input
stringlengths
2.65k
237k
output
stringclasses
1 value
<reponame>ceciliaccwei/CMPUT291-proj1<filename>project.py import sqlite3 import getpass import time import os import sys if len(sys.argv) != 2: print("Please run with: python PROJECT.py DATABASE.db") quit() db_file_path = sys.argv[1] if not (os.path.exists(db_file_path)): print("File does not exist!") quit() conn = sqlite3.connect(db_file_path) c = conn.cursor() def main(): conn.commit() start() def start(): while True: print("Welcome to the carpool system!") print("1. Login") print("2. Sign up") print("3. Exit") command = raw_input("What would you like to do today?") if command == '1': login() elif command == '2': signup() continue elif command == '3': quit() else: print("Command not found!") def signup(): while True: email = raw_input("Please enter your email (or BACK): ").lower() if email == 'back': main() c.execute("SELECT * FROM members WHERE email like ?;",(email,)) dup = c.fetchone() if dup == None: break else: print("This email has already been signed up.") password = getpass.getpass("Enter your password: ") name = raw_input("Please enter your name: ") while True: try: phone = int(raw_input("Please enter your phone number: ")) break except ValueError: print("Invalid input!") continue c.execute("INSERT INTO members VALUES ('%s', '%s', '%s', '%s');" % (email, name, phone, password)) conn.commit() print("You have successfully signed up!") main() def login(): while True: email = raw_input("Please enter your email (or BACK): ").lower() if email== 'back': break c.execute("SELECT * FROM members WHERE email like '%s';" % email) username = c.fetchone() if username == None: print("Username does not exist") else: password = <PASSWORD>("Enter your password: ") c.execute("SELECT * FROM members WHERE email like '%s' and pwd = '%s';" % (email,password)) check_login = c.fetchone() if check_login == None: print("Incorrect email or password, please try again.") else: print("Welcome!") user = email c.execute("SELECT msgTimestamp,sender,rno,content FROM inbox WHERE email like '%s' and seen like '%s';" % (user,'n')) print ("".join('%-22s'%x[0] for x in c.description)) ar = [[str(item) for item in results] for results in c.fetchall()] for row in ar: print ("".join('%-22s'%x for x in row)) c.execute("UPDATE inbox SET seen = '%s' where seen like '%s' and email like '%s';" % ('y','n',user)) conn.commit() chooseOptionCategory(user) break def chooseOptionCategory(user): while True: print("1. Rides") print("2. Bookings") print("3. Requests") print("4. Log out") print("5. Exit") option = raw_input("Your option: ") if option == '1': RidesRelated(user) elif option == '2': BookingsRelated(user) elif option == '3': RequestRelated(user) elif option == '4': main() elif option == '5': quit() else: print("Command not found!") def RidesRelated(user): while True: print("1. Offer a ride") print("2. Search rides") print("3. Go back") print("4. Log out") print("5. Exit") option = raw_input("Your option: ") if option == '1': offerRide(user) elif option == '2': searchRides(user) elif option== '3': break elif option=='4': main() elif option=='5': quit() else: print("Command not found!") def BookingsRelated(user): while True: print("1. List all confirmed bookings on my rides") print("2. Book someone on my ride") print("3. Go back") print("4. Log out") print("5. Exit") option = raw_input("Your option: ") if option == '1': bookingList(user) elif option == '2': rideList(user) elif option == '3': break elif option == '4': main() elif option == '5': quit() else: print("Command not found!") def rideList(user): print(user) c.execute("SELECT r.*, \ r.seats - ifnull(sum(b.seats) ,0) as seats_avaliable \ from rides r \ left join \ bookings b \ on r.rno = b.rno \ where r.driver like ? \ group by r.rno;",(user,)) print ("".join('%-13s'%x[0] for x in c.description)) ar = [[str(item) for item in results] for results in c.fetchall()] prtFive(ar) while True: print("Enter the rno to book") rno = raw_input("Or enter Back to go back: ") try: rno = int(rno) c.execute("SELECT * FROM rides WHERE driver like '%s' and rno='%d';" % (user,rno)) check_ride = c.fetchone() if check_ride == None: print("That's not your ride!") continue else: break except ValueError: return while True: email = raw_input("Please enter the member's email: ").lower() c.execute("SELECT * FROM members WHERE email like ?;",(email,)) exist = c.fetchone() if exist == None: print("Member does not exist!") continue else: break c.execute("SELECT r.seats - ifnull(sum(b.seats) ,0) \ from rides r \ left join \ bookings b \ on r.rno = b.rno \ where r.rno = ? \ group by r.rno;",(rno,)) seats_avaliable = int(c.fetchone()[0]) while True: try: cost = int(raw_input("Please enter your cost: ")) break except ValueError: print("Invalid input!") continue while True: try: seats = int(raw_input("Please enter your seats booked: ")) break except ValueError: print("Invalid input!") continue c.execute("SELECT lcode FROM locations") ar = [[str(item) for item in results] for results in c.fetchall()] pickup = raw_input("Please enter the pickup loc: ") while [pickup] not in ar: searchLoc(pickup) pickup = raw_input("Please enter the pickup loc: ") dropoff = raw_input("Please enter the dropoff loc: ") while [dropoff] not in ar: searchLoc(dst) dropoff = raw_input("Please enter your dst: ") seen = 'n' c.execute("SELECT ifnull(max(bno),0) FROM bookings") bno = int(c.fetchone()[0])+1 if seats <= seats_avaliable: c.execute("INSERT INTO bookings VALUES (?,?,?,?,?,?,?);",(bno,email,rno,cost, int(seats), pickup,dropoff)) content = "your booking: "+str(bno)+ " is confirmed!" msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S") c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?);",(email,msgTimestamp,user,content,rno,seen)) conn.commit() print("message sent!") else: option = raw_input("Are you sure to overbook? [Y/N]") if option.upper() == 'Y': c.execute("INSERT INTO bookings VALUES (?,?,?,?,?,?,?);",(bno,email,rno,cost, int(seats), pickup,dropoff)) content = "your booking: "+str(bno)+ " is confirmed!" msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S") c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?);",(email,msgTimestamp,user,content,rno,seen)) conn.commit() print("message sent!") else: return def bookingList(user): c.execute("SELECT b.* FROM bookings b, rides r\ where b.rno = r.rno and r.driver = ?;",(user,)) print ("".join('%-13s'%x[0] for x in c.description)) ar = [[str(item) for item in results] for results in c.fetchall()] prtFive(ar) print("Enter the bno to cancel") bno = raw_input("Or enter Back to go back: ") if bno.upper() == 'BACK': return else: bno = int(bno) c.execute("SELECT email,rno FROM bookings WHERE bno = ?;",(bno,)) temp = c.fetchone() email = str(temp[0]) rno = int(temp[1]) c.execute("DELETE FROM bookings WHERE bno= ?;", (bno,)) content = "Your booking "+str(bno)+" is cancelled" seen = 'n' msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S") c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?);",(email,msgTimestamp,user,content,rno,seen)) conn.commit() print("Booking cancelled!") def RequestRelated(user): while True: print("1. Post a request") print("2. List my own requests") print("3. Search requests") print("4. Go back") print("5. Log out") print("6. Exit") option = raw_input("Your option: ") if option == '1': postRequest(user) elif option == '2': myRequest(user) elif option == '3': searchRequest(user) elif option == '4': break elif option == '5': main() elif option == '6': quit() else: print("Command not found!") def offerRide(user): c.execute("SELECT ifnull(max(rno),0) FROM rides") rno = int(c.fetchone()[0])+1 while True: try: price = float(raw_input("Please enter the price: ")) break except ValueError: print("Invalid input!") continue while True: rdate = raw_input("Please enter the date in YYYY-MM-DD format: ") try: time.strptime(rdate,"%Y-%m-%d") break except ValueError: print("Invalid input!") continue while True: seats = raw_input("Please enter the seats offered: ") try: seats = int(seats) break except ValueError: print("Invalid input!") continue c.execute("SELECT lcode FROM locations") ar = [[str(item) for item in results] for results in c.fetchall()] src = raw_input("Please enter your src: ") while [src] not in ar: searchLoc(src) src = raw_input("Please enter your src: ") dst = raw_input("Please enter your dst: ") while [dst] not in ar: searchLoc(dst) dst = raw_input("Please enter your dst: ") lugDesc = raw_input("Please enter the luggage description: ") while True: cno = raw_input("Please enter the car number: ") if cno == '': cno = None break else: try: cno = int(cno) c.execute("SELECT * FROM cars WHERE owner like '%s' and cno ='%d';" % (user,cno)) check_car = c.fetchone() if check_car == None: print("That's not your car!") c.execute("SELECT * from cars where owner like '%s';"%(user,)) print ("".join('%-20s'%x[0] for x in c.description)) ar = [[str(item) for item in results] for results in c.fetchall()] for row in ar: print ("".join('%-20s'%x for x in row)) continue else: break except ValueError: print("Invalid input!") continue c.execute("INSERT INTO rides VALUES (?,?,?,?,?,?,?,?,?);",(rno,price,rdate,seats,lugDesc,src,dst,user,cno)) conn.commit() c.execute("SELECT lcode FROM locations") ar = [[str(item) for item in results] for results in c.fetchall()] enroute = raw_input("Please enter the enroute location: ") while enroute != '': while [enroute] not in ar: searchLoc(enroute) enroute = raw_input("Please enter your enroute location: ") c.execute("INSERT INTO enroute VALUES (?,?);",(rno,enroute)) enroute = raw_input("Please enter next enroute location: ") conn.commit() print("New ride offered!") def searchRides(user): keyword1 = raw_input("Keyword1: ") keyword2 = raw_input("Keyword2: ") if keyword2 != '': keyword3 = raw_input("Keyword3: ") if keyword3 != '': #search by 3 searchbyK1 = searchKeyword(keyword1) searchbyK2 = searchKeyword(keyword2) searchbyK3 = searchKeyword(keyword3) ar = list((set(tuple(i) for i in searchbyK1)&set(tuple(j) for j in searchbyK2)&set(tuple(k) for k in searchbyK3))) else: #search by 2 searchbyK1 = searchKeyword(keyword1) searchbyK2 = searchKeyword(keyword2) ar = list((set(tuple(i) for i in searchbyK1)&set(tuple(j) for j in searchbyK2))) else: #search by 1 ar = searchKeyword(keyword1) description = ['rno', 'price', 'rdate', 'seats', 'lugDesc', 'src', 'dst', 'driver', 'cno', 'make', 'model', 'year', 'seats'] print ("".join('%-13s'%x for x in description)) ar = map(list,ar) ar = sorted(ar, key=lambda x: int(x[0])) prtFive(ar) messageDriver(user) def searchKeyword(keyword): c.execute("SELECT r.*,c.make,c.model,c.year,c.seats FROM rides r,enroute er,locations l \ left join cars c on r.cno = c.cno \ where (er.lcode = l.lcode and er.rno = r.rno)\ and\ (l.lcode like ? or l.city like ? or l.prov like ? or l.address like ?)\ union \ SELECT DISTINCT r.*,c.make,c.model,c.year,c.seats FROM rides r,locations l1, locations l2\ left join cars c on r.cno = c.cno \ WHERE (r.src = l1.lcode and r.dst =l2.lcode)\ and\ (l1.lcode like ? or l1.city like ? or l1.prov like ? or l1.address like ? or \ l2.lcode like ? or l2.city like ? or l2.prov like ? or l2.address like ? );", ('%'+keyword+'%','%'+keyword+'%','%'+keyword+'%','%'+keyword+'%', '%'+keyword+'%','%'+keyword+'%','%'+keyword+'%','%'+keyword+'%', '%'+keyword+'%','%'+keyword+'%','%'+keyword+'%','%'+keyword+'%')) result = [[str(item) for item in results] for results in c.fetchall()] return result def messageDriver(user): while True: rno = raw_input("Please enter the ride where you want to book:(BACK to go back) ") if rno.upper() == 'BACK': return else: try: rno = int(rno) break except ValueError: print("Invalid input!") c.execute("SELECT driver FROM rides WHERE rno = '%d';" % (int(rno))) email = str(c.fetchone()[0]) msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S") while True: try: cost = int(raw_input("Please enter your cost: ")) break except ValueError: print("Invalid input!") continue while True: try: seats = int(raw_input("Please enter your seats booked: ")) break except ValueError: print("Invalid input!") continue c.execute("SELECT lcode FROM locations") ar = [[str(item) for item in results] for results in c.fetchall()] pickup = raw_input("Please enter the pickup loc: ") while [pickup] not in ar: searchLoc(pickup) pickup = raw_input("Please enter the pickup loc: ") dropoff = raw_input("Please enter the dropoff loc: ") while [dropoff] not in ar: searchLoc(dst) dropoff = raw_input("Please enter your dst: ") content = "cost:"+str(cost) + "; "+"seats:"+str(seats) + "; "+"pickup:"+pickup + "; "+"dropoff: "+dropoff + "; " seen = 'n' c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?)",(email,msgTimestamp,user,content,rno,seen)) conn.commit() print("message sent!") def postRequest(user): c.execute("SELECT ifnull(max(rid),0) FROM requests") rid = int(c.fetchone()[0])+1 while True: rdate = raw_input("Please enter the date in YYYY-MM-DD format: ") try: time.strptime(rdate,"%Y-%m-%d") break except ValueError: print("Invalid input!") continue c.execute("SELECT lcode FROM locations") ar = [[str(item) for
<filename>tests/001_theoretical/test_004_datetime_blueprint.py #!/bin/false # Copyright (c) 2022 <NAME>. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import os.path import sys if "DATALIDATOR_TESTS_AUTOPATH" in os.environ: __TESTS_DIR = os.path.dirname(os.path.realpath(__file__)) __MODULE_DIR = os.path.realpath(os.path.join(__TESTS_DIR, "../..")) if __TESTS_DIR not in sys.path: sys.path.insert(0, __TESTS_DIR) if __MODULE_DIR not in sys.path: sys.path.insert(0, __MODULE_DIR) import locale locale.setlocale(locale.LC_ALL, "C") # Some of DatetimeBlueprint's functionality is locale-dependent under certain circumstances! import theoretical_testutils import pytest import datetime import zoneinfo import time import ipaddress from datalidator.blueprints.ParsingMode import ParsingMode from datalidator.blueprints.impl.DatetimeBlueprint import DatetimeBlueprint from datalidator.blueprints.exc.InputDataNotConvertibleExc import InputDataNotConvertibleExc from datalidator.blueprints.exc.InputDataTypeNotInAllowlistExc import InputDataTypeNotInAllowlistExc from datalidator.filters.impl.DatetimeAddTimezoneFilter import DatetimeAddTimezoneFilter from datalidator.filters.impl.DatetimeChangeTimezoneFilter import DatetimeChangeTimezoneFilter from datalidator.filters.exc.InputDatetimeObjectIsNaiveInFilterExc import InputDatetimeObjectIsNaiveInFilterExc from datalidator.validators.impl.DatetimeIsAwareValidator import DatetimeIsAwareValidator from datalidator.validators.impl.DatetimeNotAfterValidator import DatetimeNotAfterValidator from datalidator.validators.impl.DatetimeNotBeforeValidator import DatetimeNotBeforeValidator from datalidator.validators.exc.DataValidationFailedExc import DataValidationFailedExc from datalidator.validators.exc.InputDatetimeObjectIsNaiveInValidatorExc import InputDatetimeObjectIsNaiveInValidatorExc from datalidator.validators.exc.err.InvalidValidatorConfigError import InvalidValidatorConfigError current_datetime_naive = datetime.datetime.now() current_datetime_aware = datetime.datetime.now(datetime.timezone.utc).astimezone(zoneinfo.ZoneInfo("Europe/Prague")) current_struct_time_utc = time.gmtime() current_struct_time_localtime = time.localtime() __DATETIME_BLUEPRINT_TEST_SUITE = ( (DatetimeBlueprint(parsing_mode=ParsingMode.MODE_LOOSE), ( (current_datetime_naive, lambda output: (output is not current_datetime_naive) and (output == current_datetime_naive)), (current_datetime_aware, lambda output: (output is not current_datetime_aware) and (output == current_datetime_aware)), (current_datetime_naive, lambda output: output != current_datetime_aware), (current_datetime_aware, lambda output: output != current_datetime_naive), (datetime.datetime(2020, 2, 29, 12, 40, 25), datetime.datetime(2020, 2, 29, 12, 40, 25)), (datetime.datetime(2020, 2, 29, 12, 40, 25), lambda output: output != datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=datetime.timezone.utc)), (datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=datetime.timezone.utc), lambda output: output != datetime.datetime(2020, 2, 29, 12, 40, 25)), (datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=datetime.timezone.utc), datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=datetime.timezone.utc)), (datetime.datetime(2020, 2, 29, 12, 40, 25), lambda output: output != datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("Europe/Prague"))), (datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("Europe/Prague")), lambda output: output != datetime.datetime(2020, 2, 29, 12, 40, 25)), (datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("Europe/Prague")), datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("Europe/Prague"))), (datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=datetime.timezone.utc), lambda output: output != datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("America/New_York"))), (datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("Europe/Prague")), lambda output: output != datetime.datetime(2020, 2, 29, 12, 40, 25, tzinfo=zoneinfo.ZoneInfo("America/New_York"))), (datetime.datetime(1066, 10, 14, 15, 0, 0), datetime.datetime(1066, 10, 14, 15, 0, 0)), (datetime.datetime(8500, 10, 14, 15, 0, 0), datetime.datetime(8500, 10, 14, 15, 0, 0)), (datetime.datetime(1066, 10, 14, 15, 0, 0, tzinfo=datetime.timezone.utc), datetime.datetime(1066, 10, 14, 15, 0, 0, tzinfo=datetime.timezone.utc)), (datetime.datetime(8500, 10, 14, 15, 0, 0, tzinfo=datetime.timezone.utc), datetime.datetime(8500, 10, 14, 15, 0, 0, tzinfo=datetime.timezone.utc)), (current_struct_time_utc, datetime.datetime.fromtimestamp(time.mktime(current_struct_time_utc), tz=datetime.timezone.utc)), (current_struct_time_localtime, datetime.datetime.fromtimestamp(time.mktime(current_struct_time_localtime), tz=datetime.timezone.utc)), # Hardcoded time.struct_time objects cannot be tested reliably, because their conversion to datetime.datetime is timezone-dependent. (time.struct_time((2020, 1, 1, 1, 1, 1, 1, 1, 1)), lambda output: True), # This just tests whether the blueprint did not raise an exception. (time.struct_time((8020, 1, 1, 1, 1, 1, 1, 1, 1)), lambda output: True), # This just tests whether the blueprint did not raise an exception. (time.struct_time((1066, 1, 1, 1, 1, 1, 1, 1, 1)), InputDataNotConvertibleExc), (time.struct_time((150000000, 1, 1, 1, 1, 1, 1, 1, 1)), InputDataNotConvertibleExc), (time.struct_time((15000, 1, 1, 1, 1, 1, 1, 1, 1)), InputDataNotConvertibleExc), # https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat (current_datetime_naive.isoformat(), current_datetime_naive), (current_datetime_aware.isoformat(), current_datetime_aware), ("2022-01-08T18:38:54.648842", datetime.datetime(2022, 1, 8, 18, 38, 54, 648842)), ("2022-01-08x18:38:54.648842", datetime.datetime(2022, 1, 8, 18, 38, 54, 648842)), ("2022-01-08\x0018:38:54.648842", datetime.datetime(2022, 1, 8, 18, 38, 54, 648842)), ("2011-11", InputDataNotConvertibleExc), ("2011-11-04", datetime.datetime(2011, 11, 4, 0, 0, 0)), ("201-11-04", InputDataNotConvertibleExc), ("20111-11-04", InputDataNotConvertibleExc), ("2011-11-04 01", datetime.datetime(2011, 11, 4, 1, 0, 0)), ("2011-11-04 01:05", datetime.datetime(2011, 11, 4, 1, 5, 0)), ("2011-11-04 01:05:23", datetime.datetime(2011, 11, 4, 1, 5, 23)), ("2011-02-29 01:05:23", InputDataNotConvertibleExc), ("2012-02-29 01:05:23", datetime.datetime(2012, 2, 29, 1, 5, 23)), ("2011-13-01 10:05:50", InputDataNotConvertibleExc), ("2011-11-31 01:05:23", InputDataNotConvertibleExc), ("2011-01-01 24:05:23", InputDataNotConvertibleExc), ("2011-01-01 10:60:23", InputDataNotConvertibleExc), ("2011-01-01 10:05:60", InputDataNotConvertibleExc), ("\r\n2011-11-04 01:05:23\t", datetime.datetime(2011, 11, 4, 1, 5, 23)), (" 2011-11-04 01:05:23 ", datetime.datetime(2011, 11, 4, 1, 5, 23)), ("\x002011-11-04 01:05:23", InputDataNotConvertibleExc), ("2011-11-04 01:05\x00:23", InputDataNotConvertibleExc), ("2011-11-04 01:05:23\x00", datetime.datetime(2011, 11, 4, 1, 5, 23)), # This works for some reason... ("2011-11-04 01:05:23\x00abc", InputDataNotConvertibleExc), # ... and this does not. ("2011-11-04 01:05:23.28", InputDataNotConvertibleExc), ("2011-11-04 01:05:23.283", datetime.datetime(2011, 11, 4, 1, 5, 23, 283000)), ("2011-11-04 01:05:23.2839", InputDataNotConvertibleExc), ("2011-11-04 01:05:23.283999", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999)), ("2011-11-04 01:05:23.2839999", InputDataNotConvertibleExc), ("2011-11-04+05:30", datetime.datetime(2011, 11, 4, 5, 30, 0)), # The '+05:30' is not considered a timezone, but 'hour:minute'!!! ("2011-11-04 01:05:23+00:00", datetime.datetime(2011, 11, 4, 1, 5, 23, tzinfo=datetime.timezone.utc)), ("2011-11-04 01:05:23.283+00:00", datetime.datetime(2011, 11, 4, 1, 5, 23, 283000, tzinfo=datetime.timezone.utc)), ("2011-11-04 01:05:23.283999+00:00", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999, tzinfo=datetime.timezone.utc)), ("2011-11-04 01:05:23-00:00", datetime.datetime(2011, 11, 4, 1, 5, 23, tzinfo=datetime.timezone.utc)), ("2011-11-04 01:05:23.283-00:00", datetime.datetime(2011, 11, 4, 1, 5, 23, 283000, tzinfo=datetime.timezone.utc)), ("2011-11-04 01:05:23.283999-00:00", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999, tzinfo=datetime.timezone.utc)), ("2011-11-04 01:05:23+05:30", datetime.datetime(2011, 11, 4, 1, 5, 23, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800)))), ("2011-11-04 01:05:23.283+05:30", datetime.datetime(2011, 11, 4, 1, 5, 23, 283000, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800)))), ("2011-11-04 01:05:23.283999+05:30", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800)))), ("2011-11-04 01:05:23-05:30", datetime.datetime(2011, 11, 4, 1, 5, 23, tzinfo=datetime.timezone(datetime.timedelta(seconds=-19800)))), ("2011-11-04 01:05:23.283-05:30", datetime.datetime(2011, 11, 4, 1, 5, 23, 283000, tzinfo=datetime.timezone(datetime.timedelta(seconds=-19800)))), ("2011-11-04 01:05:23.283999-05:30", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999, tzinfo=datetime.timezone(datetime.timedelta(seconds=-19800)))), ("2011-11-04 01:05:23+05:30:10", datetime.datetime(2011, 11, 4, 1, 5, 23, tzinfo=datetime.timezone(datetime.timedelta(seconds=19810)))), ("2011-11-04 01:05:23.283+05:30:10.123", InputDataNotConvertibleExc), ("2011-11-04 01:05:23.283999+05:30:10.123456", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999, tzinfo=datetime.timezone(datetime.timedelta(seconds=19810, microseconds=123456)))), ("2011-11-04 01:05:23-05:30:10", datetime.datetime(2011, 11, 4, 1, 5, 23, tzinfo=datetime.timezone(datetime.timedelta(seconds=-19810)))), ("2011-11-04 01:05:23.283-05:30:10.123", InputDataNotConvertibleExc), ("2011-11-04 01:05:23.283999-05:30:10.123456", datetime.datetime(2011, 11, 4, 1, 5, 23, 283999, tzinfo=datetime.timezone(datetime.timedelta(seconds=-19810, microseconds=-123456)))), ("15.02.2021 04:50:45", InputDataNotConvertibleExc), ("02/15/2021 04:50:45", InputDataNotConvertibleExc), ("02/15/2021 04:50 am", InputDataNotConvertibleExc), ("0", datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), ("-0", datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), ("\r\n0000 \t", datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), ("0.0", datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), ("-0.0", datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), ("\r\n0000.000 \t", datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), ("\x001204295445", InputDataNotConvertibleExc), ("1204295445", datetime.datetime(2008, 2, 29, 14, 30, 45, 0, tzinfo=datetime.timezone.utc)), ("1204295445.123", datetime.datetime(2008, 2, 29, 14, 30, 45, 123000, tzinfo=datetime.timezone.utc)), ("1204295445.123456", datetime.datetime(2008, 2, 29, 14, 30, 45, 123456, tzinfo=datetime.timezone.utc)), ("-5000000000", datetime.datetime(1811, 7, 23, 15, 6, 40, tzinfo=datetime.timezone.utc)), ("-5000000000.1", datetime.datetime(1811, 7, 23, 15, 6, 39, 900000, tzinfo=datetime.timezone.utc)), ("-100000000000.1", InputDataNotConvertibleExc), # ValueError ("-100000000000000000", InputDataNotConvertibleExc), # OSError ("-100000000000000000000", InputDataNotConvertibleExc), # OverflowError ("100000000000", datetime.datetime(5138, 11, 16, 9, 46, 40, tzinfo=datetime.timezone.utc)), ("100000000000000.1", InputDataNotConvertibleExc), # ValueError ("100000000000000000", InputDataNotConvertibleExc), # OSError ("100000000000000000000", InputDataNotConvertibleExc), # OverflowError ("", InputDataNotConvertibleExc), ("abcdefxyz", InputDataNotConvertibleExc), ("\x00", InputDataNotConvertibleExc), ("řeřicha", InputDataNotConvertibleExc), ("nan", InputDataNotConvertibleExc), ("-inf", InputDataNotConvertibleExc), (0, datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), (-0, datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), (0.0, datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), (-0.0, datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)), (1204295445, datetime.datetime(2008, 2, 29, 14, 30, 45, 0, tzinfo=datetime.timezone.utc)), (1204295445.123, datetime.datetime(2008, 2, 29, 14, 30, 45, 123000, tzinfo=datetime.timezone.utc)), (1204295445.123456, datetime.datetime(2008, 2, 29, 14, 30, 45, 123456, tzinfo=datetime.timezone.utc)), (-5000000000, datetime.datetime(1811, 7, 23, 15, 6, 40, tzinfo=datetime.timezone.utc)), (-5000000000.1, datetime.datetime(1811, 7, 23, 15, 6, 39, 900000, tzinfo=datetime.timezone.utc)), (-100000000000.1, InputDataNotConvertibleExc), # ValueError (-100000000000000000, InputDataNotConvertibleExc), # OSError (-100000000000000000000, InputDataNotConvertibleExc), # OverflowError (100000000000, datetime.datetime(5138, 11, 16, 9, 46, 40, tzinfo=datetime.timezone.utc)), (100000000000000.1, InputDataNotConvertibleExc), # ValueError (100000000000000000, InputDataNotConvertibleExc), # OSError (100000000000000000000, InputDataNotConvertibleExc), # OverflowError (float("nan"), InputDataNotConvertibleExc), (float("-inf"), InputDataNotConvertibleExc), (datetime.date(100, 2, 2), datetime.datetime(100, 2, 2, 0, 0, 0)), (datetime.date(2012, 2, 29), datetime.datetime(2012, 2, 29, 0, 0, 0)), (datetime.date(8000, 2, 2), datetime.datetime(8000, 2,
options detected ' + 'for databases {0} and {1}, same pid {2}, skipping {0}'.format(instance, duplicate_instance, pid)) pgcon.close() return True # now we have all components to create a cluster descriptor desc = make_cluster_desc(name=instance, version=dbver, workdir=work_directory, pid=pid, pgcon=pgcon, conn=conn) clusters.append(desc) return True def make_cluster_desc(name, version, workdir, pid, pgcon, conn): """Create cluster descriptor, complete with the reconnect function.""" def reconnect(): pgcon = psycopg2.connect(**conn) pid = read_postmaster_pid(workdir, name) return (pgcon, pid) return { 'name': name, 'ver': version, 'wd': workdir, 'pid': pid, 'pgcon': pgcon, 'reconnect': reconnect } class ProcNetParser(): """ Parse /proc/net/{tcp,tcp6,unix} and return the list of address:port pairs given the set of socket descriptors belonging to the object. The result is grouped by the socket type in a dictionary. """ NET_UNIX_FILENAME = '/proc/net/unix' NET_TCP_FILENAME = '/proc/net/tcp' NET_TCP6_FILENAME = '/proc/net/tcp6' def __init__(self): self.reinit() def reinit(self): self.sockets = {} self.unix_socket_header_len = 0 # initialize the sockets hash with the contents of unix # and tcp sockets. tcp IPv6 is also read if it's present for fname in (ProcNetParser.NET_UNIX_FILENAME, ProcNetParser.NET_TCP_FILENAME): self.read_socket_file(fname) if os.access(ProcNetParser.NET_TCP6_FILENAME, os.R_OK): self.read_socket_file(ProcNetParser.NET_TCP6_FILENAME) @staticmethod def _hex_to_int_str(val): return str(int(val, 16)) @staticmethod def _hex_to_ip(val): newval = format(socket.ntohl(int(val, 16)), '08X') return '.'.join([str(int(newval[i: i + 2], 16)) for i in range(0, 8, 2)]) @staticmethod def _hex_to_ipv6(val): newval_list = [format(socket.ntohl(int(val[x: x + 8], 16)), '08X') for x in range(0, 32, 8)] return ':'.join([':'.join((x[:4], x[4:])) for x in newval_list]) def match_socket_inodes(self, inodes): """ return the dictionary with socket types as strings, containing addresses (or unix path names) and port """ result = {} for inode in inodes: if inode in self.sockets: addr_tuple = self.parse_single_line(inode) if addr_tuple is None: continue socket_type = addr_tuple[0] if socket_type in result: result[socket_type].append(addr_tuple[1:]) else: result[socket_type] = [addr_tuple[1:]] return result def read_socket_file(self, filename): """ read file content, produce a dict of socket inode -> line """ socket_type = filename.split('/')[-1] try: with open(filename) as fp: data = fp.readlines() except os.error as e: logger.error('unable to read from {0}: OS reported {1}'.format(filename, e)) # remove the header header = (data.pop(0)).split() if socket_type == 'unix': self.unix_socket_header_len = len(header) indexes = [i for i, name in enumerate(header) if name.lower() == 'inode'] if len(indexes) != 1: logger.error('attribute \'inode\' in the header of {0} is not unique or missing: {1}'.format( filename, header)) else: inode_idx = indexes[0] if socket_type != 'unix': # for a tcp socket, 2 pairs of fields (tx_queue:rx_queue and tr:tm->when # are separated by colons and not spaces) inode_idx -= 2 for line in data: fields = line.split() inode = int(fields[inode_idx]) self.sockets[inode] = [socket_type, line] def parse_single_line(self, inode): """ apply socket-specific parsing rules """ result = None (socket_type, line) = self.sockets[inode] if socket_type == 'unix': # we are interested in everything in the last field # note that it may contain spaces or other separator characters fields = line.split(None, self.unix_socket_header_len - 1) socket_path = fields[-1] # check that it looks like a PostgreSQL socket match = re.search(r'(.*?)/\.s\.PGSQL\.(\d+)$', socket_path) if match: # path - port result = (socket_type,) + match.groups(1) else: logger.warning('unix socket name is not recognized as belonging to PostgreSQL: {0}'.format(socket_path)) else: address_port = line.split()[1] (address_hex, port_hex) = address_port.split(':') port = self._hex_to_int_str(port_hex) if socket_type == 'tcp6': address = self._hex_to_ipv6(address_hex) elif socket_type == 'tcp': address = self._hex_to_ip(address_hex) else: logger.error('unrecognized socket type: {0}'.format(socket_type)) result = (socket_type, address, port) return result def main(): global TICK_LENGTH, logger, options # bail out if we are not running Linux if platform.system() != 'Linux': print('Non Linux database hosts are not supported at the moment. Can not continue') sys.exit(243) if not psycopg2_available: print('Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue') sys.exit(254) options, args = parse_args() TICK_LENGTH = options.tick output_method = options.output_method if not output_method_is_valid(output_method): print('Unsupported output method: {0}'.format(output_method)) print('Valid output methods are: {0}'.format(','.join(get_valid_output_methods()))) sys.exit(1) if output_method == OUTPUT_METHOD.curses and not curses_available: print('Curses output is selected, but curses are unavailable, falling back to console output') output_method == OUTPUT_METHOD.console # set basic logging if options.log_file: LOG_FILE_NAME = options.log_file # truncate the former logs with open(LOG_FILE_NAME, 'w'): pass logging.basicConfig(format='%(levelname)s: %(asctime)-15s %(message)s', filename=LOG_FILE_NAME) else: logging.basicConfig(format='%(levelname)s: %(asctime)-15s %(message)s') logger = logging.getLogger(__name__) logger.setLevel((logging.INFO if options.verbose else logging.ERROR)) log_stderr = logging.StreamHandler() logger.addHandler(log_stderr) user_dbname = options.instance user_dbver = options.version clusters = [] # now try to read the configuration file config = (read_configuration(options.config_file) if options.config_file else None) if config: for instance in config: if user_dbname and instance != user_dbname: continue # pass already aquired connections to make sure we only list unique clusters. host = config[instance].get('host') port = config[instance].get('port') conn = build_connection(host, port, config[instance].get('user'), config[instance].get('dbname')) if not establish_user_defined_connection(instance, conn, clusters): logger.error('failed to acquire details about ' + 'the database cluster {0}, the server will be skipped'.format(instance)) elif options.host: port = options.port or "5432" # try to connet to the database specified by command-line options conn = build_connection(options.host, options.port, options.username, options.dbname) instance = options.instance or "default" if not establish_user_defined_connection(instance, conn, clusters): logger.error("unable to continue with cluster {0}".format(instance)) else: # do autodetection postmasters = get_postmasters_directories() # get all PostgreSQL instances for result_work_dir, data in postmasters.items(): (ppid, dbver, dbname) = data # if user requested a specific database name and version - don't try to connect to others if user_dbname: if dbname != user_dbname or not result_work_dir or not ppid: continue if user_dbver is not None and dbver != user_dbver: continue try: conndata = detect_db_connection_arguments(result_work_dir, ppid, dbver) if conndata is None: continue host = conndata['host'] port = conndata['port'] conn = build_connection(host, port, options.username, options.dbname) pgcon = psycopg2.connect(**conn) except Exception as e: logger.error('PostgreSQL exception {0}'.format(e)) pgcon = None if pgcon: desc = make_cluster_desc(name=dbname, version=dbver, workdir=result_work_dir, pid=ppid, pgcon=pgcon, conn=conn) clusters.append(desc) collectors = [] groups = {} try: if len(clusters) == 0: logger.error('No suitable PostgreSQL instances detected, exiting...') logger.error('hint: use -v for details, ' + 'or specify connection parameters manually in the configuration file (-c)') sys.exit(1) # initialize the disks stat collector process and create an exchange queue q = JoinableQueue(1) work_directories = [cl['wd'] for cl in clusters if 'wd' in cl] collector = DetachedDiskStatCollector(q, work_directories) collector.start() consumer = DiskCollectorConsumer(q) collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) for cl in clusters: part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer) pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid) groupname = cl['wd'] groups[groupname] = {'pg': pg, 'partitions': part} collectors.append(part) collectors.append(pg) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. logger.removeHandler(log_stderr) loop(collectors, consumer, groups, output_method) logger.addHandler(log_stderr) except KeyboardInterrupt: pass except curses.error: print(traceback.format_exc()) if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ: print('Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh') except: print(traceback.format_exc()) finally: sys.exit(0) class DetachedDiskStatCollector(Process): """ This class runs in a separate process and runs du and df """ def __init__(self, q, work_directories): super(DetachedDiskStatCollector, self).__init__() self.work_directories = work_directories self.q = q self.daemon = True self.df_cache = {} def run(self): while True: # wait until the previous data is consumed self.q.join() result = {} self.df_cache = {} for wd in self.work_directories: du_data = self.get_du_data(wd) df_data = self.get_df_data(wd) result[wd] = [du_data, df_data] self.q.put(result) time.sleep(TICK_LENGTH) def get_du_data(self, wd): data_size = 0 xlog_size = 0 result = {'data': [], 'xlog': []} try: data_size = self.run_du(wd, BLOCK_SIZE) xlog_size = self.run_du(wd + '/pg_xlog/', BLOCK_SIZE) except Exception as e: logger.error('Unable to read free space information for the pg_xlog and data directories for the directory\ {0}: {1}'.format(wd, e)) else: # XXX: why do we pass the block size there? result['data'] = str(data_size), wd result['xlog'] = str(xlog_size), wd + '/pg_xlog' return result @staticmethod def run_du(pathname, block_size=BLOCK_SIZE, exclude=['lost+found']): size = 0 folders = [pathname] root_dev = os.lstat(pathname).st_dev while len(folders): c = folders.pop() for e in os.listdir(c): e = os.path.join(c, e) try: st = os.lstat(e) except os.error: # don't care about files removed while we are trying to read them. continue # skip data on different partition if st.st_dev != root_dev: continue mode = st.st_mode & 0xf000 # S_IFMT if mode == 0x4000: # S_IFDIR if e in exclude: continue folders.append(e) size += st.st_size if mode == 0x8000: # S_IFREG size += st.st_size return long(size / block_size)
<gh_stars>0 # -*- coding: utf-8 -*- # Copyright 2014, Digital Reasoning # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Amazon Web Services provider for stackd.io """ import os import re import stat import logging from uuid import uuid4 from time import sleep import requests from requests.exceptions import ConnectionError import boto import boto.ec2 import boto.vpc import yaml from boto.route53.record import ResourceRecordSets from boto.exception import EC2ResponseError from cloud.providers.base import ( BaseCloudProvider, TimeoutException, MaxFailuresException, ) from core.exceptions import BadRequest, InternalServerError GROUP_PATTERN = re.compile('\d+:[a-zA-Z0-9-_]') CIDR_PATTERN = re.compile('[0-9]+(?:\.[0-9]+){3}\/\d{1,2}') # Boto Errors BOTO_DUPLICATE_ERROR_CODE = 'InvalidPermission.Duplicate' logger = logging.getLogger(__name__) DEFAULT_ROUTE53_TTL = 30 class Route53Domain(object): def __init__(self, access_key, secret_key, domain): ''' `access_key` The AWS access key id `secret_key` The AWS secret access key `domain` An existing Route53 domain to manage ''' self.access_key = access_key self.secret_key = secret_key self.domain = domain # Loaded after connection is made self.hosted_zone = None self.zone_id = None self.rr_sets = None self.conn = boto.connect_route53(self.access_key, self.secret_key) self._load_domain() def _load_domain(self): ''' Attempts to look up a Route53 hosted zone given a domain name (they're much easier to remember than a zone id). Once loaded, pulls out the zone id and stores it in `zone_id` ''' # look up the hosted zone based on the domain response = self.conn.get_hosted_zone_by_name(self.domain) self.hosted_zone = response['GetHostedZoneResponse']['HostedZone'] # Get the zone id, but strip off the first part /hostedzone/ self.zone_id = self.hosted_zone['Id'][len('/hostedzone/'):] def get_rrnames_set(self, force=False): ''' Returns a cached set of resource record names for our zone id, and builds the cached set if we haven't already. ''' if not force and self.rr_sets is not None: return self.rr_sets self.rr_sets = {} for rr in self.conn.get_all_rrsets(self.zone_id): self.rr_sets[rr.name] = { 'type': rr.type, 'ttl': rr.ttl, 'value': rr.to_print(), } return self.rr_sets def start_rr_transaction(self): ''' Creates a new Route53 ResourceRecordSets object that is used internally like a transaction of sorts. You may add or delete many resource records using a single set by calling the `add_record` and `delete_record` methods. Finish the transaction with `finish_rr_transaction` NOTE: Calling this method again before finishing will not finish an existing transaction or delete it. To cancel an existing transaction use the `cancel_rr_transaction`. ''' if not hasattr(self, '_rr_txn') or self._rr_txn is None: # Return a new ResourceRecordSets "transaction" self._rr_txn = ResourceRecordSets(self.conn, self.zone_id) def finish_rr_transaction(self): ''' If a transaction exists, commit the changes to Route53 ''' if self._rr_txn is not None: self._rr_txn.commit() self._rr_txn = None def cancel_rr_transaction(self): ''' Basically deletes the existing transaction. ''' self._rr_txn = None def add_record(self, record_name, record_value, record_type, ttl=DEFAULT_ROUTE53_TTL): ''' NOTE: This method must be called after `start_rr_transaction`. Adds a new record to the existing resource record transaction. `record_name` The subdomain part of the record (e.g., web-1 for a domain like web-1.dev.example.com) `record_value` The host or IP the record will point to. `record_type` The type of the record (CNAME or A) `ttl` The TTL for the record in seconds, default is 30 seconds ''' # Update the record name to be fully qualified with the domain # for this instance. The period on the end is required. record_name += '.{0}.'.format(self.domain) # Check for an existing record and remove it before # updating it rr_names = self.get_rrnames_set() if record_name in rr_names: self._delete_rr_record(record_name, [rr_names[record_name]['value']], rr_names[record_name]['type'], ttl=rr_names[record_name]['ttl']) # self._delete_rr_record(record_name, # [record_value], # record_type, # ttl=ttl) self._add_rr_record(record_name, [record_value], record_type, ttl=ttl) def delete_record(self, record_name, record_value, record_type, ttl=DEFAULT_ROUTE53_TTL): ''' Almost the same as `add_record` but it deletes an existing record NOTE: The name, value, and ttl must all match an existing record or Route53 will not allow it to be removed. ''' # Update the record name to be fully qualified with the domain # for this instance. The period on the end is required. record_name += '.{0}.'.format(self.domain) # Only remove the record if it exists rr_names = self.get_rrnames_set() if record_name in rr_names: self._delete_rr_record(record_name, [rr_names[record_name]['value']], rr_names[record_name]['type'], ttl=rr_names[record_name]['ttl']) # self._delete_rr_record(record_name, # [record_value], # record_type, # ttl=ttl) return True return False def _add_rr_record(self, record_name, record_values, record_type, **kwargs): rr = self._rr_txn.add_change('CREATE', record_name, record_type, **kwargs) for v in record_values: rr.add_value(v) def _delete_rr_record(self, record_name, record_values, record_type, **kwargs): rr = self._rr_txn.add_change('DELETE', record_name, record_type, **kwargs) for v in record_values: rr.add_value(v) class AWSCloudProvider(BaseCloudProvider): SHORT_NAME = 'ec2' LONG_NAME = 'Amazon Web Services' # The account/owner id ACCOUNT_ID = 'account_id' # The AWS access key id ACCESS_KEY = 'access_key_id' # The AWS secret access key SECRET_KEY = 'secret_access_key' # The AWS keypair name KEYPAIR = 'keypair' # The AWS security groups # SECURITY_GROUPS = 'security_groups' # The path to the private key for SSH PRIVATE_KEY = 'private_key' # VPC fields VPC_ID = 'vpc_id' # VPC_SUBNETS = 'vpc_subnets' # The route53 zone to use for managing DNS ROUTE53_DOMAIN = 'route53_domain' REGION = 'region' STATE_STOPPED = 'stopped' STATE_RUNNING = 'running' STATE_SHUTTING_DOWN = 'shutting-down' STATE_TERMINATED = 'terminated' @classmethod def get_required_fields(self): return [ self.ACCOUNT_ID, self.ACCESS_KEY, self.SECRET_KEY, self.KEYPAIR, self.PRIVATE_KEY, self.ROUTE53_DOMAIN, # self.SECURITY_GROUPS ] @classmethod def get_available_actions(self): return [ self.ACTION_STOP, self.ACTION_START, self.ACTION_TERMINATE, self.ACTION_LAUNCH, self.ACTION_PROVISION, self.ACTION_ORCHESTRATE, self.ACTION_CUSTOM, ] def get_private_key_path(self): return os.path.join(self.provider_storage, 'id_rsa') def get_config_file_path(self): return os.path.join(self.provider_storage, 'config') def get_config(self): with open(self.get_config_file_path(), 'r') as f: config_data = yaml.safe_load(f) return config_data def get_credentials(self): config_data = self.get_config() return config_data['location'], config_data['id'], config_data['key'] def get_provider_data(self, data, files=None): # write the private key to the proper location private_key_path = self.get_private_key_path() with open(private_key_path, 'w') as f: f.write(data[self.PRIVATE_KEY]) # change the file permissions of the RSA key os.chmod(private_key_path, stat.S_IRUSR) config_data = { 'provider': self.SHORT_NAME, 'id': data[self.ACCESS_KEY], 'key': data[self.SECRET_KEY], 'keyname': data[self.KEYPAIR], 'private_key': private_key_path, 'append_domain': data[self.ROUTE53_DOMAIN], 'location': data[self.REGION], 'ssh_connect_timeout': 300, 'wait_for_passwd_timeout': 5, 'rename_on_destroy': True, 'delvol_on_destroy': True, } r = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document') master_region = r.json()['region'] if master_region == data[self.REGION]: config_data['ssh_interface'] = 'private_ips' else: config_data['ssh_interface'] = 'public_ips' # Save the data out to a file that can be reused by this provider # later if necessary with open(self.get_config_file_path(), 'w') as f: f.write(yaml.safe_dump(config_data, default_flow_style=False)) return config_data # TODO: Ignoring code complexity issues... def validate_provider_data(self, data, files=None): # NOQA errors = super(AWSCloudProvider, self) \ .validate_provider_data(data, files) if errors: return errors # check authentication credentials try: ec2 = boto.ec2.connect_to_region( data[self.REGION], aws_access_key_id=data[self.ACCESS_KEY], aws_secret_access_key=data[self.SECRET_KEY]) ec2.get_all_zones() except boto.exception.EC2ResponseError, e: err_msg = 'Unable to authenticate to AWS with the provided keys.' errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg) if errors: return errors # check keypair try: ec2.get_all_key_pairs(data[self.KEYPAIR]) except boto.exception.EC2ResponseError, e: errors.setdefault(self.KEYPAIR, []).append( 'The keypair \'{0}\' does not exist in this account.' ''.format(data[self.KEYPAIR]) ) # check route 53 domain try: if self.ROUTE53_DOMAIN in data: # connect to route53 and check that the domain is available r53 = boto.connect_route53(data[self.ACCESS_KEY], data[self.SECRET_KEY]) found_domain = False domain = data[self.ROUTE53_DOMAIN] hosted_zones = r53.get_all_hosted_zones() hosted_zones = \ hosted_zones['ListHostedZonesResponse']['HostedZones'] for hosted_zone in hosted_zones: if hosted_zone['Name'].startswith(domain): found_domain = True break if not found_domain: err = 'The Route53 domain \'{0}\' does not exist in ' \ 'this account.'.format(domain) errors.setdefault(self.ROUTE53_DOMAIN, []).append(err) # except boto.exception.DNSServerError, e: except Exception, e: logger.exception('Route53 issue?') errors.setdefault(self.ROUTE53_DOMAIN, []).append(str(e)) # check VPC required fields if self.VPC_ID in data and data[self.VPC_ID]: vpc_id = data[self.VPC_ID] try: vpc = boto.vpc.connect_to_region( data[self.REGION], aws_access_key_id=data[self.ACCESS_KEY], aws_secret_access_key=data[self.SECRET_KEY]) except boto.exception.EC2ResponseError, e: err_msg = ('Unable to authenticate to AWS VPC with the ' 'provided keys.') errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg) if not errors: try: vpc.get_all_vpcs([vpc_id]) except boto.exception.EC2ResponseError, e: errors.setdefault(self.VPC_ID, []).append( 'The VPC \'{0}\' does not exist in this account.' .format(vpc_id) ) return errors def validate_image_id(self, image_id): ec2 = self.connect_ec2() try: ec2.get_all_images(image_ids=[image_id]) return True, '' except boto.exception.EC2ResponseError, e: return False, e.error_message def connect_route53(self): # Load the configuration file to get a few things we'll need # to manage DNS config_data = self.get_config() access_key = config_data['id'] secret_key = config_data['key'] domain = config_data['append_domain'] # load a new Route53Domain class and return it return Route53Domain(access_key, secret_key, domain) def connect_ec2(self): if not hasattr(self, '_ec2_connection'): region, access_key, secret_key = self.get_credentials() self._ec2_connection = boto.ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) return self._ec2_connection def connect_vpc(self): if not hasattr(self, '_vpc_connection'): region, access_key, secret_key = self.get_credentials() self._vpc_connection = boto.vpc.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) return self._vpc_connection
from itertools import chain from typing import Optional, Iterable, Set, Union, TYPE_CHECKING import logging import pyvex import claripy from ...storage.memory_mixins.paged_memory.pages.multi_values import MultiValues from ...engines.light import SimEngineLight, SimEngineLightVEXMixin, SpOffset from ...engines.vex.claripy.datalayer import value as claripy_value from ...engines.vex.claripy.irop import operations as vex_operations from ...errors import SimEngineError, SimMemoryMissingError from ...calling_conventions import DEFAULT_CC, SimRegArg, SimStackArg, SimCC from ...utils.constants import DEFAULT_STATEMENT from ...knowledge_plugins.key_definitions.definition import Definition from ...knowledge_plugins.key_definitions.tag import LocalVariableTag, ParameterTag, ReturnValueTag, Tag from ...knowledge_plugins.key_definitions.atoms import Atom, Register, MemoryLocation, Tmp from ...knowledge_plugins.key_definitions.constants import OP_BEFORE, OP_AFTER from ...knowledge_plugins.key_definitions.heap_address import HeapAddress from ...knowledge_plugins.key_definitions.undefined import Undefined from ...code_location import CodeLocation from .rd_state import ReachingDefinitionsState from .external_codeloc import ExternalCodeLocation if TYPE_CHECKING: from ...knowledge_plugins import FunctionManager l = logging.getLogger(name=__name__) class SimEngineRDVEX( SimEngineLightVEXMixin, SimEngineLight, ): # pylint:disable=abstract-method """ Implements the VEX execution engine for reaching definition analysis. """ def __init__(self, project, call_stack, maximum_local_call_depth, functions=None, function_handler=None): super().__init__() self.project = project self._call_stack = call_stack self._maximum_local_call_depth = maximum_local_call_depth self.functions: Optional['FunctionManager'] = functions self._function_handler = function_handler self._visited_blocks = None self._dep_graph = None self.state: ReachingDefinitionsState def process(self, state, *args, **kwargs): self._dep_graph = kwargs.pop('dep_graph', None) self._visited_blocks = kwargs.pop('visited_blocks', None) # we are using a completely different state. Therefore, we directly call our _process() method before # SimEngine becomes flexible enough. try: self._process( state, None, block=kwargs.pop('block', None), ) except SimEngineError as e: if kwargs.pop('fail_fast', False) is True: raise e l.error(e) return self.state, self._visited_blocks, self._dep_graph def _process_block_end(self): self.stmt_idx = DEFAULT_STATEMENT if self.block.vex.jumpkind == "Ijk_Call": # it has to be a function addr = self._expr(self.block.vex.next) self._handle_function(addr) elif self.block.vex.jumpkind == "Ijk_Boring": # test if the target addr is a function or not addr = self._expr(self.block.vex.next) addr_v = addr.one_value() if addr_v is not None and addr_v.concrete: addr_int = addr_v._model_concrete.value if addr_int in self.functions: # yes it's a jump to a function self._handle_function(addr) # # Private methods # @staticmethod def _external_codeloc(): return ExternalCodeLocation() # # VEX statement handlers # def _handle_Stmt(self, stmt): if self.state.analysis: self.state.analysis.insn_observe(self.ins_addr, stmt, self.block, self.state, OP_BEFORE) super()._handle_Stmt(stmt) if self.state.analysis: self.state.analysis.insn_observe(self.ins_addr, stmt, self.block, self.state, OP_AFTER) def _handle_WrTmp(self, stmt: pyvex.IRStmt.WrTmp): data: MultiValues = self._expr(stmt.data) tmp_atom = Tmp(stmt.tmp, self.tyenv.sizeof(stmt.tmp) // self.arch.byte_width) # if len(data.values) == 1 and 0 in data.values: # data_v = data.one_value() # if data_v is not None: # # annotate data with its definition # data = MultiValues(offset_to_values={ # 0: {self.state.annotate_with_def(data_v, Definition(tmp_atom, self._codeloc())) # } # }) self.tmps[stmt.tmp] = data self.state.kill_and_add_definition(tmp_atom, self._codeloc(), data, ) def _handle_WrTmpData(self, tmp: int, data): super()._handle_WrTmpData(tmp, data) self.state.kill_and_add_definition(Tmp(tmp, self.tyenv.sizeof(tmp)), self._codeloc(), self.tmps[tmp]) # e.g. PUT(rsp) = t2, t2 might include multiple values def _handle_Put(self, stmt): reg_offset: int = stmt.offset size: int = stmt.data.result_size(self.tyenv) // 8 reg = Register(reg_offset, size) data = self._expr(stmt.data) # special handling for references to heap or stack variables if len(data.values) == 1: for d in next(iter(data.values.values())): if self.state.is_heap_address(d): heap_offset = self.state.get_heap_offset(d) if heap_offset is not None: self.state.add_use(MemoryLocation(HeapAddress(heap_offset), 1), self._codeloc()) elif self.state.is_stack_address(d): stack_offset = self.state.get_stack_offset(d) if stack_offset is not None: self.state.add_use(MemoryLocation(SpOffset(self.arch.bits, stack_offset), 1), self._codeloc()) self.state.kill_and_add_definition(reg, self._codeloc(), data) # e.g. STle(t6) = t21, t6 and/or t21 might include multiple values def _handle_Store(self, stmt): addr = self._expr(stmt.addr) size = stmt.data.result_size(self.tyenv) // 8 data = self._expr(stmt.data) if len(addr.values) == 1: addrs = next(iter(addr.values.values())) self._store_core(addrs, size, data, endness=stmt.endness) def _handle_StoreG(self, stmt: pyvex.IRStmt.StoreG): guard = self._expr(stmt.guard) guard_v = guard.one_value() if claripy.is_true(guard_v): addr = self._expr(stmt.addr) if len(addr.values) == 1: addrs = next(iter(addr.values.values())) size = stmt.data.result_size(self.tyenv) // 8 data = self._expr(stmt.data) self._store_core(addrs, size, data) elif claripy.is_false(guard_v): pass else: # guard.data == {True, False} # get current data addr = self._expr(stmt.addr) if len(addr.values) == 1: addrs = next(iter(addr.values.values())) size = stmt.data.result_size(self.tyenv) // 8 data_old = self._load_core(addrs, size, stmt.endness) data = self._expr(stmt.data) self._store_core(addrs, size, data, data_old=data_old) def _store_core(self, addr: Iterable[Union[int,HeapAddress,SpOffset]], size: int, data: MultiValues, data_old: Optional[MultiValues]=None, endness=None): if data_old is not None: data = data.merge(data_old) for a in addr: if self.state.is_top(a): l.debug('Memory address undefined, ins_addr = %#x.', self.ins_addr) else: tags: Optional[Set[Tag]] if isinstance(a, int): atom = MemoryLocation(a, size) tags = None elif self.state.is_stack_address(a): atom = MemoryLocation(SpOffset(self.arch.bits, self.state.get_stack_offset(a)), size) function_address = ( self.project.kb .cfgs.get_most_accurate() .get_all_nodes(self._codeloc().ins_addr, anyaddr=True)[0] .function_address ) tags = {LocalVariableTag( function=function_address, metadata={'tagged_by': 'SimEngineRDVEX._store_core'} )} elif self.state.is_heap_address(a): atom = MemoryLocation(HeapAddress(self.state.get_heap_offset(a)), size) tags = None else: continue # different addresses are not killed by a subsequent iteration, because kill only removes entries # with same index and same size self.state.kill_and_add_definition(atom, self._codeloc(), data, tags=tags, endness=endness) def _handle_LoadG(self, stmt): guard = self._expr(stmt.guard) guard_v = guard.one_value() if claripy.is_true(guard_v): # FIXME: full conversion support if stmt.cvt.find('Ident') < 0: l.warning('Unsupported conversion %s in LoadG.', stmt.cvt) load_expr = pyvex.expr.Load(stmt.end, stmt.cvt_types[1], stmt.addr) wr_tmp_stmt = pyvex.stmt.WrTmp(stmt.dst, load_expr) self._handle_WrTmp(wr_tmp_stmt) elif claripy.is_false(guard_v): wr_tmp_stmt = pyvex.stmt.WrTmp(stmt.dst, stmt.alt) self._handle_WrTmp(wr_tmp_stmt) else: if stmt.cvt.find('Ident') < 0: l.warning('Unsupported conversion %s in LoadG.', stmt.cvt) load_expr = pyvex.expr.Load(stmt.end, stmt.cvt_types[1], stmt.addr) load_expr_v = self._expr(load_expr) alt_v = self._expr(stmt.alt) data = load_expr_v.merge(alt_v) self._handle_WrTmpData(stmt.dst, data) def _handle_Exit(self, stmt): _ = self._expr(stmt.guard) target = stmt.dst.value self.state.mark_guard(self._codeloc(), target) def _handle_IMark(self, stmt): pass def _handle_AbiHint(self, stmt): pass def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC): if stmt.storedata is None: # load-link addr = self._expr(stmt.addr) if len(addr.values) == 1: addrs = next(iter(addr.values.values())) size = self.tyenv.sizeof(stmt.result) // self.arch.byte_width load_result = self._load_core(addrs, size, stmt.endness) self.tmps[stmt.result] = load_result self.state.kill_and_add_definition(Tmp(stmt.result, self.tyenv.sizeof(stmt.result) // self.arch.byte_width), self._codeloc(), load_result) else: # store-conditional storedata = self._expr(stmt.storedata) addr = self._expr(stmt.addr) if len(addr.values) == 1: addrs = next(iter(addr.values.values())) size = self.tyenv.sizeof(stmt.storedata.tmp) // self.arch.byte_width self._store_core(addrs, size, storedata) self.tmps[stmt.result] = MultiValues(offset_to_values={0: {claripy.BVV(1, 1)}}) self.state.kill_and_add_definition(Tmp(stmt.result, self.tyenv.sizeof(stmt.result) // self.arch.byte_width), self._codeloc(), self.tmps[stmt.result]) # # VEX expression handlers # def _expr(self, expr) -> MultiValues: data = super()._expr(expr) if data is None: bits = expr.result_size(self.tyenv) top = self.state.top(bits) data = MultiValues(offset_to_values={0: {top}}) return data def _handle_RdTmp(self, expr: pyvex.IRExpr.RdTmp) -> Optional[MultiValues]: tmp: int = expr.tmp self.state.add_use(Tmp(tmp, expr.result_size(self.tyenv) // self.arch.byte_width), self._codeloc()) if tmp in self.tmps: return self.tmps[tmp] return None # e.g. t0 = GET:I64(rsp), rsp might be defined multiple times def _handle_Get(self, expr: pyvex.IRExpr.Get) -> MultiValues: reg_offset: int = expr.offset bits: int = expr.result_size(self.tyenv) size: int = bits // self.arch.byte_width reg_atom = Register(reg_offset, size) try: values: MultiValues = self.state.register_definitions.load(reg_offset, size=size) except SimMemoryMissingError: top = self.state.top(size * self.arch.byte_width) # annotate it top = self.state.annotate_with_def(top, Definition(reg_atom, ExternalCodeLocation())) values = MultiValues({0: {top}}) # write it to registers self.state.kill_and_add_definition(reg_atom, self._external_codeloc(), values) current_defs: Optional[Iterable[Definition]] = None for vs in values.values.values(): for v in vs: if current_defs is None: current_defs = self.state.extract_defs(v) else: current_defs = chain(current_defs, self.state.extract_defs(v)) if current_defs is None: # no defs can be found. add a fake definition self.state.kill_and_add_definition(reg_atom, self._external_codeloc(), values) self.state.add_use(reg_atom, self._codeloc()) return values # e.g. t27 = LDle:I64(t9), t9 might include multiple values # caution: Is also called from StoreG def _handle_Load(self, expr) -> MultiValues: addr = self._expr(expr.addr) bits = expr.result_size(self.tyenv) size = bits // self.arch.byte_width # convert addr from MultiValues to a list of valid addresses if len(addr.values) == 1: addrs = next(iter(addr.values.values())) return self._load_core(addrs, size, expr.endness) top = self.state.top(bits) # annotate it dummy_atom = MemoryLocation(0, size) top = self.state.annotate_with_def(top, Definition(dummy_atom, ExternalCodeLocation())) # add use self.state.add_use(dummy_atom, self._codeloc()) return MultiValues(offset_to_values={0: {top}}) def _load_core(self, addrs: Iterable[claripy.ast.Base], size: int, endness: str) -> MultiValues: result: Optional[MultiValues] = None for addr in addrs: if self.state.is_top(addr): l.debug('Memory address undefined, ins_addr = %#x.', self.ins_addr) elif self.state.is_stack_address(addr): # Load data from a local variable stack_offset = self.state.get_stack_offset(addr) if stack_offset is not None: stack_addr = self.state.live_definitions.stack_offset_to_stack_addr(stack_offset) try: vs: MultiValues = self.state.stack_definitions.load(stack_addr, size=size, endness=endness) except SimMemoryMissingError: continue memory_location = MemoryLocation(SpOffset(self.arch.bits, stack_offset), size, endness=endness) self.state.add_use(memory_location, self._codeloc()) result = result.merge(vs) if result is not None else vs elif self.state.is_heap_address(addr): # Load data from the heap heap_offset = self.state.get_heap_offset(addr) vs: MultiValues = self.state.heap_definitions.load(heap_offset, size=size, endness=endness) memory_location = MemoryLocation(HeapAddress(heap_offset), size, endness=endness) self.state.add_use(memory_location, self._codeloc()) result = result.merge(vs) if result is not None else vs else: addr_v = addr._model_concrete.value # Load data from a global region try: vs: MultiValues = self.state.memory_definitions.load(addr_v, size=size, endness=endness) except SimMemoryMissingError: # try to load it from the static memory backer # TODO: Is this still required? try: vs = MultiValues(offset_to_values={0: { claripy.BVV( self.project.loader.memory.unpack_word(addr_v, size=size), size * self.arch.byte_width )}}) except KeyError: continue result = result.merge(vs) if result is not None else vs # FIXME: _add_memory_use() iterates over the same loop memory_location = MemoryLocation(addr_v, size, endness=endness) self.state.add_use(memory_location, self._codeloc()) if result is None: result = MultiValues(offset_to_values={0: {self.state.top(size * self.arch.byte_width)}}) return result # CAUTION: experimental def _handle_ITE(self, expr: pyvex.IRExpr.ITE): cond = self._expr(expr.cond) cond_v = cond.one_value() iftrue = self._expr(expr.iftrue) iffalse = self._expr(expr.iffalse) if claripy.is_true(cond_v): return
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'InstanceMaintenancePolicy', 'InstanceMaintenancePolicyWeeklyMaintenanceWindow', 'InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime', 'InstanceMaintenanceSchedule', 'InstanceNode', 'InstanceServerCaCert', 'GetInstanceMaintenancePolicyResult', 'GetInstanceMaintenancePolicyWeeklyMaintenanceWindowResult', 'GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeResult', 'GetInstanceMaintenanceScheduleResult', 'GetInstanceNodeResult', 'GetInstanceServerCaCertResult', ] @pulumi.output_type class InstanceMaintenancePolicy(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "createTime": suggest = "create_time" elif key == "updateTime": suggest = "update_time" elif key == "weeklyMaintenanceWindows": suggest = "weekly_maintenance_windows" if suggest: pulumi.log.warn(f"Key '{key}' not found in InstanceMaintenancePolicy. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: InstanceMaintenancePolicy.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: InstanceMaintenancePolicy.__key_warning(key) return super().get(key, default) def __init__(__self__, *, create_time: Optional[str] = None, description: Optional[str] = None, update_time: Optional[str] = None, weekly_maintenance_windows: Optional[Sequence['outputs.InstanceMaintenancePolicyWeeklyMaintenanceWindow']] = None): """ :param str create_time: - Output only. The time when the policy was created. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. :param str description: Optional. Description of what this policy is for. Create/Update methods return INVALID_ARGUMENT if the length is greater than 512. :param str update_time: - Output only. The time when the policy was last updated. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. :param Sequence['InstanceMaintenancePolicyWeeklyMaintenanceWindowArgs'] weekly_maintenance_windows: Optional. Maintenance window that is applied to resources covered by this policy. Minimum 1. For the current version, the maximum number of weekly_window is expected to be one. Structure is documented below. """ if create_time is not None: pulumi.set(__self__, "create_time", create_time) if description is not None: pulumi.set(__self__, "description", description) if update_time is not None: pulumi.set(__self__, "update_time", update_time) if weekly_maintenance_windows is not None: pulumi.set(__self__, "weekly_maintenance_windows", weekly_maintenance_windows) @property @pulumi.getter(name="createTime") def create_time(self) -> Optional[str]: """ - Output only. The time when the policy was created. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. """ return pulumi.get(self, "create_time") @property @pulumi.getter def description(self) -> Optional[str]: """ Optional. Description of what this policy is for. Create/Update methods return INVALID_ARGUMENT if the length is greater than 512. """ return pulumi.get(self, "description") @property @pulumi.getter(name="updateTime") def update_time(self) -> Optional[str]: """ - Output only. The time when the policy was last updated. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. """ return pulumi.get(self, "update_time") @property @pulumi.getter(name="weeklyMaintenanceWindows") def weekly_maintenance_windows(self) -> Optional[Sequence['outputs.InstanceMaintenancePolicyWeeklyMaintenanceWindow']]: """ Optional. Maintenance window that is applied to resources covered by this policy. Minimum 1. For the current version, the maximum number of weekly_window is expected to be one. Structure is documented below. """ return pulumi.get(self, "weekly_maintenance_windows") @pulumi.output_type class InstanceMaintenancePolicyWeeklyMaintenanceWindow(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "startTime": suggest = "start_time" if suggest: pulumi.log.warn(f"Key '{key}' not found in InstanceMaintenancePolicyWeeklyMaintenanceWindow. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: InstanceMaintenancePolicyWeeklyMaintenanceWindow.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: InstanceMaintenancePolicyWeeklyMaintenanceWindow.__key_warning(key) return super().get(key, default) def __init__(__self__, *, day: str, start_time: 'outputs.InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime', duration: Optional[str] = None): """ :param str day: Required. The day of week that maintenance updates occur. - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. - MONDAY: Monday - TUESDAY: Tuesday - WEDNESDAY: Wednesday - THURSDAY: Thursday - FRIDAY: Friday - SATURDAY: Saturday - SUNDAY: Sunday Possible values are `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`. :param 'InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs' start_time: - Output only. The start time of any upcoming scheduled maintenance for this instance. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. :param str duration: - Output only. Duration of the maintenance window. The current window is fixed at 1 hour. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". """ pulumi.set(__self__, "day", day) pulumi.set(__self__, "start_time", start_time) if duration is not None: pulumi.set(__self__, "duration", duration) @property @pulumi.getter def day(self) -> str: """ Required. The day of week that maintenance updates occur. - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. - MONDAY: Monday - TUESDAY: Tuesday - WEDNESDAY: Wednesday - THURSDAY: Thursday - FRIDAY: Friday - SATURDAY: Saturday - SUNDAY: Sunday Possible values are `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`. """ return pulumi.get(self, "day") @property @pulumi.getter(name="startTime") def start_time(self) -> 'outputs.InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime': """ - Output only. The start time of any upcoming scheduled maintenance for this instance. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. """ return pulumi.get(self, "start_time") @property @pulumi.getter def duration(self) -> Optional[str]: """ - Output only. Duration of the maintenance window. The current window is fixed at 1 hour. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". """ return pulumi.get(self, "duration") @pulumi.output_type class InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(dict): def __init__(__self__, *, hours: Optional[int] = None, minutes: Optional[int] = None, nanos: Optional[int] = None, seconds: Optional[int] = None): """ :param int hours: Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. :param int minutes: Minutes of hour of day. Must be from 0 to 59. :param int nanos: Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. :param int seconds: Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. """ if hours is not None: pulumi.set(__self__, "hours", hours) if minutes is not None: pulumi.set(__self__, "minutes", minutes) if nanos is not None: pulumi.set(__self__, "nanos", nanos) if seconds is not None: pulumi.set(__self__, "seconds", seconds) @property @pulumi.getter def hours(self) -> Optional[int]: """ Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. """ return pulumi.get(self, "hours") @property @pulumi.getter def minutes(self) -> Optional[int]: """ Minutes of hour of day. Must be from 0 to 59. """ return pulumi.get(self, "minutes") @property @pulumi.getter def nanos(self) -> Optional[int]: """ Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. """ return pulumi.get(self, "nanos") @property @pulumi.getter def seconds(self) -> Optional[int]: """ Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. """ return pulumi.get(self, "seconds") @pulumi.output_type class InstanceMaintenanceSchedule(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "endTime": suggest = "end_time" elif key == "scheduleDeadlineTime": suggest = "schedule_deadline_time" elif key == "startTime": suggest = "start_time" if suggest: pulumi.log.warn(f"Key '{key}' not found in InstanceMaintenanceSchedule. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: InstanceMaintenanceSchedule.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: InstanceMaintenanceSchedule.__key_warning(key) return super().get(key, default) def __init__(__self__, *, end_time: Optional[str] = None, schedule_deadline_time: Optional[str] = None, start_time: Optional[str] = None): """ :param str end_time: - Output only. The end time of any upcoming scheduled maintenance for this instance. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. :param str schedule_deadline_time: - Output only. The deadline that the maintenance schedule start time can not go beyond, including reschedule. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. :param str start_time: - Output only. The start time of any upcoming scheduled maintenance for this instance. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. """ if end_time is not None: pulumi.set(__self__, "end_time", end_time) if schedule_deadline_time is not None: pulumi.set(__self__, "schedule_deadline_time", schedule_deadline_time) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="endTime") def end_time(self) -> Optional[str]: """ - Output only. The end time of any upcoming
true if t is an anagram of s, and false otherwise. An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once. Example 1: Input: s = "anagram", t = "nagaram" Output: true Example 2: Input: s = "rat", t = "car" Output: false Constraints: 1 <= s.length, t.length <= 5 * 104 s and t consist of lowercase English letters. """ def isAnagram(s: str, t: str) -> bool: pass """ Given an array of strings strs, group the anagrams together. You can return the answer in any order. An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once. Example 1: Input: strs = ["eat","tea","tan","ate","nat","bat"] Output: [["bat"],["nat","tan"],["ate","eat","tea"]] Example 2: Input: strs = [""] Output: [[""]] Example 3: Input: strs = ["a"] Output: [["a"]] """ def groupAnagrams(strs: List[str]) -> List[List[str]]: pass """A phrase is a palindrome if, after converting all uppercase letters into lowercase letters and removing all non-alphanumeric characters, it reads the same forward and backward. Alphanumeric characters include letters and numbers. Given a string s, return true if it is a palindrome, or false otherwise. Example 1: Input: s = "A man, a plan, a canal: Panama" Output: true Explanation: "amanaplanacanalpanama" is a palindrome. Example 2: Input: s = "race a car" Output: false Explanation: "raceacar" is not a palindrome. Example 3: Input: s = " " Output: true Explanation: s is an empty string "" after removing non-alphanumeric characters. Since an empty string reads the same forward and backward, it is a palindrome. """ def isPalindrome(s: str) -> bool: pass """ Given a string s containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid. An input string is valid if: Open brackets must be closed by the same type of brackets. Open brackets must be closed in the correct order. Example 1: Input: s = "()" Output: true Example 2: Input: s = "()[]{}" Output: true Example 3: Input: s = "(]" Output: false """ def isValid(s: str) -> bool: pass """ Given a string s, return the longest palindromic substring in s. Example 1: Input: s = "babad" Output: "bab" Explanation: "aba" is also a valid answer. Example 2: Input: s = "cbbd" Output: "bb" """ def longestPalindrome(s: str) -> str: pass """ Given a string s, return the number of palindromic substrings in it. A string is a palindrome when it reads the same backward as forward. A substring is a contiguous sequence of characters within the string. Example 1: Input: s = "abc" Output: 3 Explanation: Three palindromic strings: "a", "b", "c". Example 2: Input: s = "aaa" Output: 6 Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa". """ def countSubstrings(s: str) -> int: pass """Design an algorithm to encode a list of strings to a string. The encoded string is then sent over the network and is decoded back to the original list of strings. Machine 1 (sender) has the function: string encode(vector<string> strs) { // ... your code return encoded_string; } Machine 2 (receiver) has the function: vector<string> decode(string s) { //... your code return strs; } So Machine 1 does: string encoded_string = encode(strs); and Machine 2 does: vector<string> strs2 = decode(encoded_string); strs2 in Machine 2 should be the same as strs in Machine 1. Implement the encode and decode methods. You are not allowed to solve the problem using any serialize methods (such as eval). Example 1: Input: dummy_input = ["Hello","World"] Output: ["Hello","World"] Explanation: Machine 1: Codec encoder = new Codec(); String msg = encoder.encode(strs); Machine 1 ---msg---> Machine 2 Machine 2: Codec decoder = new Codec(); String[] strs = decoder.decode(msg); Example 2: Input: dummy_input = [""] Output: [""] """ class Codec: def encode(strs: [str]) -> str: """Encodes a list of strings to a single string. """ def decode(s: str) -> [str]: """Decodes a single string to a list of strings. """ """ Given an m x n integer matrix matrix, if an element is 0, set its entire row and column to 0's, and return the matrix. You must do it in place. Example 1: Input: matrix = [[1,1,1],[1,0,1],[1,1,1]] Output: [[1,0,1],[0,0,0],[1,0,1]] Example 2: Input: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]] Output: [[0,0,0,0],[0,4,5,0],[0,3,1,0]] """ def setZeroes(self, matrix: List[List[int]]) -> None: """ Do not return anything, modify matrix in-place instead. """ """ Given an m x n matrix, return all elements of the matrix in spiral order. Example 1: Input: matrix = [[1,2,3],[4,5,6],[7,8,9]] Output: [1,2,3,6,9,8,7,4,5] Example 2: Input: matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]] Output: [1,2,3,4,8,12,11,10,9,5,6,7] """ def spiralOrder(matrix: List[List[int]]) -> List[int]: pass """ Given an m x n grid of characters board and a string word, return true if word exists in the grid. The word can be constructed from letters of sequentially adjacent cells, where adjacent cells are horizontally or vertically neighboring. The same letter cell may not be used more than once. Example 1: Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCCED" Output: true Example 2: Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "SEE" Output: true Example 3: Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB" Output: false """ def exist(board: List[List[str]], word: str) -> bool: pass """ You are climbing a staircase. It takes n steps to reach the top. Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top? Example 1: Input: n = 2 Output: 2 Explanation: There are two ways to climb to the top. 1. 1 step + 1 step 2. 2 steps Example 2: Input: n = 3 Output: 3 Explanation: There are three ways to climb to the top. 1. 1 step + 1 step + 1 step 2. 1 step + 2 steps 3. 2 steps + 1 step """ def climbStairs(self, n: int) -> int: pass """You are given an integer array coins representing coins of different denominations and an integer amount representing a total amount of money. Return the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1. You may assume that you have an infinite number of each kind of coin. Example 1: Input: coins = [1,2,5], amount = 11 Output: 3 Explanation: 11 = 5 + 5 + 1 Example 2: Input: coins = [2], amount = 3 Output: -1 Example 3: Input: coins = [1], amount = 0 Output: 0 """ def coin_change(coins: List[int], amount: int) -> int: pass """ Given an integer array nums, return the length of the longest strictly increasing subsequence. A subsequence is a sequence that can be derived from an array by deleting some or no elements without changing the order of the remaining elements. For example, [3,6,2,7] is a subsequence of the array [0,3,1,6,2,2,7]. Example 1: Input: nums = [10,9,2,5,3,7,101,18] Output: 4 Explanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4. Example 2: Input: nums = [0,1,0,3,2,3] Output: 4 Example 3: Input: nums = [7,7,7,7,7,7,7] Output: 1 """ def lengthOfLIS(self, nums: List[int]) -> int: pass """ Given two strings text1 and text2, return the length of their longest common subsequence. If there is no common subsequence, return 0. A subsequence of a string is a new string generated from the original string with some characters (can be none) deleted without changing the relative order of the remaining characters. For example, "ace" is a subsequence of "abcde". A common subsequence of two strings is a subsequence that is common to both strings. Example 1: Input: text1 = "abcde", text2 = "ace" Output: 3 Explanation: The longest common subsequence is "ace" and its length is 3. Example 2: Input: text1 = "abc", text2 = "abc" Output: 3 Explanation: The longest common subsequence is "abc" and its length is 3. Example 3: Input: text1 = "abc", text2 = "def" Output: 0 Explanation: There is no such common subsequence, so the result is 0. """ def longestCommonSubsequence(self, text1: str, text2: str) -> int: pass """ Given a string s and a dictionary of strings wordDict, return true if s can be segmented into a space-separated sequence of one or more dictionary words. Note that the same word in the dictionary may be reused multiple times in the segmentation. Example 1: Input: s = "leetcode", wordDict = ["leet","code"] Output: true Explanation: Return true because "leetcode" can be segmented as "leet code". Example 2: Input: s = "applepenapple", wordDict = ["apple","pen"] Output: true Explanation: Return true because "applepenapple" can be segmented as "apple pen apple". Note that you are allowed to reuse a dictionary word. Example 3: Input: s = "catsandog", wordDict = ["cats","dog","sand","and","cat"] Output: false """ def wordBreak(self, s: str, wordDict: List[str]) -> bool: pass """ Given an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the
{ref['href'].split('/')[-1]: ref['attr'].ae_num for ref in pi_refs} # verify all AE-IDs allocated per prouter are unique self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) # verification at Physical Routers pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # Case #1 # Remove PI-1 from PR-1 through VMI-1 of VPG-1 update vmi_id = 1 vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) vpg_name = 'vpg_%s_%s' % (test_id, vmi_id) vmi_infos = [ {'name': vmi_name, 'vmi_uuid': vmi_objs[vmi_name].uuid, 'vpg': vpg_objs[vpg_name].uuid, 'fabric': fabric_name, 'pis': vpg1_pis[1:], 'vlan': vlan_ids[vmi_id - 1], 'is_untagged': False}] self._update_vmis(vmi_infos) # re-read VPGs for vpg, vpg_o in vpg_objs.items(): vpg_objs[vpg] = self.api.virtual_port_group_read( id=vpg_o.uuid) # Verifications at VPG-1 # check PI-1 is removed from VPG-1 pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 3) # verify AE-ID associated with VPG-1 # AE-IDs of remaining PIs are unaffected self.assertEqual(len(set(vpg1_ae_ids)), 1) # Verifications at VPG-2 pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 4) # verify AE-ID associated with VPG-2 # AE-IDs of remaining PIs are unaffected self.assertEqual(len(set(vpg2_ae_ids)), 1) # verification at Physical Routers # since only PI-1 was removed, AE-ID allocation remains same pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # Case #2 # Remove all PIs but PI-1 in PR-1/VPG-1 through VMI-1 of VPG-1 update vmi_id = 1 vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) vpg_name = 'vpg_%s_%s' % (test_id, vmi_id) vmi_infos = [ {'name': vmi_name, 'vmi_uuid': vmi_objs[vmi_name].uuid, 'vpg': vpg_objs[vpg_name].uuid, 'fabric': fabric_name, 'pis': vpg1_pis[0], 'vlan': vlan_ids[vmi_id - 1], 'is_untagged': False}] self._update_vmis(vmi_infos) # re-read VPGs for vpg, vpg_o in vpg_objs.items(): vpg_objs[vpg] = self.api.virtual_port_group_read( id=vpg_o.uuid) # Verifications at VPG-1 # check PI-1 is removed from VPG-1 pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 1) # verify AE-ID associated with VPG-1 # AE-IDs of remaining PIs are unaffected self.assertEqual(len(set(vpg1_ae_ids)), 1) self.assertIsNone(vpg1_ae_ids[0]) # Verifications at VPG-2 pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 4) # verify AE-ID associated with VPG-2 # AE-IDs of remaining PIs are unaffected self.assertEqual(len(set(vpg2_ae_ids)), 1) # verify at ZK Physical Routers pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) # Case 3 # Create a new VPG with two PIs, one from each PRs case3_id = 99 vpg3_uuid = vpg_objs[vpg_names[2]].uuid case3_vn_name = 'vn_case3_%s_%s' % (test_id, case3_id) case3_vn_objs = self._create_vns(proj_obj, [case3_vn_name]) vn_objs.update(case3_vn_objs) pi_per_pr = 1 case3_pr1_pi_name = '%s_case3_pr1_pi%d' % (test_id, case3_id) case3_pr2_pi_name = '%s_case3_pr2_pi%d' % (test_id, case3_id) case3_pr1_pi_objs = self._create_pi_objects( pr_objs[0], [case3_pr1_pi_name]) case3_pr2_pi_objs = self._create_pi_objects( pr_objs[1], [case3_pr2_pi_name]) vpg3_pis = [pi.get_fq_name() for pi in [case3_pr1_pi_objs[case3_pr1_pi_name], case3_pr2_pi_objs[case3_pr2_pi_name]]] pi_objs.update(case3_pr1_pi_objs) pi_objs.update(case3_pr2_pi_objs) vmi_info = { 'name': 'vmi_vpg3_%s_%s' % (test_id, 99), 'vmi_id': 99, 'parent_obj': proj_obj, 'vn': case3_vn_objs[case3_vn_name], 'vpg': vpg3_uuid, 'fabric': fabric_name, 'pis': vpg3_pis, 'vlan': case3_id, 'is_untagged': False} case3_vmi_obj = self._create_vmis([vmi_info]) vmi_objs.update(case3_vmi_obj) # re-read VPG-3 vpg_objs[vpg_names[2]] = self.api.virtual_port_group_read(id=vpg3_uuid) # Verifications at VPG-3 pi_refs = vpg_objs[vpg_names[2]].get_physical_interface_refs() vpg3_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 2) # verify an AE-ID is allocated self.assertEqual(len(set(vpg3_ae_ids)), 1) # verify at ZK Physical Routers # Since a new VPG is added with PIs at Case-3 # only two AE-IDs should remain in each prouter pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # TO-DO # Verify AE-ID is re-allocated instead of new one # Case 4 # Add PI1/PR1, PI2/PR1 to VPG-1, so a new AE-ID is allocated vmi_id = 9 vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) vpg_name = 'vpg_%s_%s' % (test_id, 1) vmi_infos = [ {'name': vmi_name, 'vmi_uuid': vmi_objs[vmi_name].uuid, 'vpg': vpg_objs[vpg_name].uuid, 'fabric': fabric_name, 'pis': vpg1_pis[0:2], 'vlan': vlan_ids[vmi_id - 1], 'is_untagged': False}] self._update_vmis(vmi_infos) # re-read VPGs for vpg_name, vpg_obj in vpg_objs.items(): vpg_objs[vpg_name] = self.api.virtual_port_group_read( id=vpg_obj.uuid) # Verifications at VPG-1 # check PI1/PR1 and PI2/PR1 are added to VPG-1 pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 2) # verify AE-ID associated with VPG-1 # A new AE-ID is allocated self.assertEqual(len(set(vpg1_ae_ids)), 1) # Verifications at VPG-2 pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 4) # verify AE-ID associated with VPG-2 # AE-IDs of remaining PIs are unaffected self.assertEqual(len(set(vpg2_ae_ids)), 1) # verify at ZK Physical Routers pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 3) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # TO-DO # Verify AE-ID is re-allocated instead of new one # Case X1 # Create a new VPG with two PIs, both belonging to the same PR caseX1_id = 101 vpgX1_uuid = vpg_objs[vpg_names[3]].uuid caseX1_vn_names = ['vn_caseX1_%s_%s' % (test_id, caseX1_id)] caseX1_vn_objs = self._create_vns(proj_obj, caseX1_vn_names) vn_objs.update(caseX1_vn_objs) pi_per_pr = 2 caseX1_pr1_pi_names = ['%s_caseX1_pr1_pi%d' % (test_id, caseX1_id), '%s_caseX1_pr1_pi%d' % (test_id, caseX1_id + 1)] caseX1_pr1_pi_objs = self._create_pi_objects( pr_objs[0], caseX1_pr1_pi_names) vpgX1_pis = [pi.get_fq_name() for pi in [caseX1_pr1_pi_objs[caseX1_pr1_pi_names[0]], caseX1_pr1_pi_objs[caseX1_pr1_pi_names[1]]]] pi_objs.update(caseX1_pr1_pi_objs) vmi_info = { 'name': 'vmi_vpg4_%s_%s' % (test_id, caseX1_id), 'vmi_id': caseX1_id, 'parent_obj': proj_obj, 'vn': caseX1_vn_objs[caseX1_vn_names[0]], 'vpg': vpgX1_uuid, 'fabric': fabric_name, 'pis': vpgX1_pis, 'vlan': caseX1_id, 'is_untagged': False} caseX1_vmi_obj = self._create_vmis([vmi_info]) vmi_objs.update(caseX1_vmi_obj) # re-read VPG-3 vpg_objs[vpg_names[3]] = self.api.virtual_port_group_read( id=vpgX1_uuid) # Verifications at VPG-3 pi_refs = vpg_objs[vpg_names[3]].get_physical_interface_refs() vpgX1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 2) # verify an AE-ID is allocated self.assertEqual(len(set(vpgX1_ae_ids)), 1) # verify at ZK Physical Routers # Since a new VPG is added with PIs at Case-3 # only two AE-IDs should remain in each prouter pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 4) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # TO-DO # Verify AE-ID is re-allocated instead of new one # Case X2 # Create a new VPG with two PIs,both belonging to the same PR caseX2_id = 103 vpgX2_uuid = vpg_objs[vpg_names[4]].uuid caseX2_vn_names = ['vn_caseX2_%s_%s' % (test_id, caseX2_id)] caseX2_vn_objs = self._create_vns(proj_obj, caseX2_vn_names) vn_objs.update(caseX2_vn_objs) pi_per_pr = 2 caseX2_pr1_pi_names = ['%s_caseX2_pr1_pi%d' % (test_id, caseX2_id), '%s_caseX2_pr1_pi%d' % (test_id, caseX2_id + 1)] caseX2_pr1_pi_objs = self._create_pi_objects( pr_objs[0], caseX2_pr1_pi_names) vpgX2_pis = [pi.get_fq_name() for pi in [caseX2_pr1_pi_objs[caseX2_pr1_pi_names[0]], caseX2_pr1_pi_objs[caseX2_pr1_pi_names[1]]]] pi_objs.update(caseX2_pr1_pi_objs) vmi_info = { 'name': 'vmi_vpg5_%s_%s' % (test_id, caseX2_id), 'vmi_id': caseX2_id, 'parent_obj': proj_obj, 'vn': caseX2_vn_objs[caseX2_vn_names[0]], 'vpg': vpgX2_uuid, 'fabric': fabric_name, 'pis': vpgX2_pis, 'vlan': caseX2_id, 'is_untagged': False} caseX2_vmi_obj = self._create_vmis([vmi_info]) vmi_objs.update(caseX2_vmi_obj) # re-read VPG-3 vpg_objs[vpg_names[4]] = self.api.virtual_port_group_read( id=vpgX2_uuid) # Verifications at VPG-3 pi_refs = vpg_objs[vpg_names[4]].get_physical_interface_refs() self.assertEqual(len(pi_refs), 2) # verify an AE-ID is allocated self.assertEqual(len(set(vpgX1_ae_ids)), 1) # verify at ZK Physical Routers # Since a new VPG is added with PIs at Case-3 # only two AE-IDs should remain in each prouter pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 5) self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # TO-DO # Verify AE-ID is re-allocated instead of new one # Case X3 # Add PI2/PR1 to VPG-1, so a new AE-ID is allocated vmi_id = 10 vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) vpg_name = 'vpg_%s_%s' % (test_id, 1) vpg1_uuid = vpg_objs[vpg_names[0]].uuid vmi_infos = [ {'name': vmi_name, 'vmi_uuid': vmi_objs[vmi_name].uuid, 'vpg': vpg_objs[vpg_name].uuid, 'fabric': fabric_name, 'pis': vpg1_pis[1], 'vlan': vlan_ids[vmi_id - 1], 'is_untagged': False}] self._update_vmis(vmi_infos) # re-read VPG1 vpg_objs[vpg_names[0]] = self.api.virtual_port_group_read(id=vpg1_uuid) # Verifications at VPG-1 # check PI1/PR1 and PI2/PR1 are added to VPG-1 pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 1) # verify AE-ID associated with VPG-1 # A new AE-ID is allocated self.assertEqual(len(set(vpg1_ae_ids)), 1) # Verifications at VPG-2 # pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() # vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] # self.assertEqual(len(pi_refs), 4) # verify AE-ID associated with VPG-2 # AE-IDs of remaining PIs are unaffected # self.assertEqual(len(set(vpg2_ae_ids)), 1) # verify at ZK Physical Routers pr_ae_ids = get_zk_ae_ids() self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 4) # self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) # TO-DO # Verify AE-ID is re-allocated instead of new one # Case X4 # Create a new VPG with two PIs, both belonging to the same PR caseX4_id = 66 vpgX4_uuid = vpg_objs[vpg_names[5]].uuid caseX4_vn_names = ['vn_caseX4_%s_%s' % (test_id, caseX4_id)] caseX4_vn_objs = self._create_vns(proj_obj, caseX4_vn_names) vn_objs.update(caseX4_vn_objs) pi_per_pr = 2 caseX4_pr1_pi_names = ['%s_caseX4_pr1_pi%d' % (test_id, caseX4_id), '%s_caseX4_pr1_pi%d' % (test_id, caseX4_id + 1)] caseX4_pr1_pi_objs = self._create_pi_objects( pr_objs[0], caseX4_pr1_pi_names) vpgX4_pis = [pi.get_fq_name() for pi in [caseX4_pr1_pi_objs[caseX4_pr1_pi_names[0]], caseX4_pr1_pi_objs[caseX4_pr1_pi_names[1]]]] pi_objs.update(caseX4_pr1_pi_objs) vmi_info = { 'name': 'vmi_vpg6_%s_%s' % (test_id, caseX4_id), 'vmi_id': caseX4_id, 'parent_obj': proj_obj, 'vn': caseX4_vn_objs[caseX4_vn_names[0]], 'vpg': vpgX4_uuid, 'fabric': fabric_name, 'pis': vpgX4_pis, 'vlan': caseX4_id, 'is_untagged': False} caseX4_vmi_obj = self._create_vmis([vmi_info]) vmi_objs.update(caseX4_vmi_obj) # re-read VPG-5 vpg_objs[vpg_names[5]] = self.api.virtual_port_group_read( id=vpgX4_uuid) # Verifications at VPG-5 pi_refs = vpg_objs[vpg_names[5]].get_physical_interface_refs() vpgX4_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] self.assertEqual(len(pi_refs), 2) # verify an AE-ID is allocated self.assertEqual(len(set(vpgX4_ae_ids)), 1) # verify at ZK Physical Routers # Since a new VPG is added with PIs at Case-3 # only two AE-IDs should remain in
memory banks downgraded ADL_XFIREX_STATE_DOWNGRADEMEMBANKS = Constant(1 << 22).set_string('CrossfireX cannot be enabled unless memory banks downgraded') # Notification that memory banks are currently downgraded ADL_XFIREX_STATE_MEMBANKSDOWNGRADED = Constant(1 << 23).set_string('Notification that memory banks are currently downgraded') # Extended desktop or clone mode is allowed. ADL_XFIREX_STATE_DUALDISPLAYSALLOWED = Constant(1 << 24).set_string(' Extended desktop or clone mode is allowed.') # P2P mapping was through peer aperture ADL_XFIREX_STATE_P2P_APERTURE_MAPPING = Constant(1 << 25).set_string('P2P mapping was through peer aperture') # For back compatible ADL_XFIREX_STATE_P2PFLUSH_REQUIRED = ADL_XFIREX_STATE_P2P_APERTURE_MAPPING # There is CrossfireX side port connection between GPUs ADL_XFIREX_STATE_XSP_CONNECTED = Constant(1 << 26).set_string('There is CrossfireX side port connection between GPUs') # System needs a reboot bofore enable CrossfireX ADL_XFIREX_STATE_ENABLE_CF_REBOOT_REQUIRED = Constant(1 << 27).set_string('System needs a reboot bofore enable CrossfireX') # System needs a reboot after disable CrossfireX ADL_XFIREX_STATE_DISABLE_CF_REBOOT_REQUIRED = Constant(1 << 28).set_string('System needs a reboot after disable CrossfireX') # Indicate base driver handles the downgrade key updating ADL_XFIREX_STATE_DRV_HANDLE_DOWNGRADE_KEY = Constant(1 << 29).set_string('Indicate base driver handles the downgrade key updating') # CrossfireX need to be reconfigured by CCC because of a LDA chain broken ADL_XFIREX_STATE_CF_RECONFIG_REQUIRED = Constant(1 << 30).set_string(' CrossfireX need to be reconfigured by CCC because of a LDA chain broken') # Could not obtain current status ADL_XFIREX_STATE_ERRORGETTINGSTATUS = Constant(1 << 31).set_string('Could not obtain current status') # @} # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_ADJUSTMENT_PIXELFORMAT adjustment values # (bit-vector) # ///////////////////////////////////////////////////////////////////////// # / \defgroup define_pixel_formats Pixel Formats values # / This group defines the various Pixel Formats that a particular digital # display can support. \n # / Since a display can support multiple formats, these values can be # bit-or'ed to indicate the various formats \n # @{ ADL_DISPLAY_PIXELFORMAT_UNKNOWN = Constant(0).set_string('Unknown') ADL_DISPLAY_PIXELFORMAT_RGB = Constant(1 << 0).set_string('RGB Full Range') # Limited range ADL_DISPLAY_PIXELFORMAT_YCRCB444 = Constant(1 << 0).set_string('YCRCB444') # Limited range ADL_DISPLAY_PIXELFORMAT_YCRCB422 = Constant(1 << 0).set_string('YCRCB422') ADL_DISPLAY_PIXELFORMAT_RGB_LIMITED_RANGE = Constant(1 << 0).set_string('RGB Limited Range') # Full range ADL_DISPLAY_PIXELFORMAT_RGB_FULL_RANGE = ADL_DISPLAY_PIXELFORMAT_RGB ADL_DISPLAY_PIXELFORMAT_YCRCB420 = Constant(1 << 0).set_string('YCRCB420') # @} # / \defgroup define_contype Connector Type Values # / ADLDisplayConfig.ulConnectorType defines # @{ ADL_DL_DISPLAYCONFIG_CONTYPE_UNKNOWN = 0 ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NONI2C_JP = 1 ADL_DL_DISPLAYCONFIG_CONTYPE_CV_JPN = 2 ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NA = 3 ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NONI2C_NA = 4 ADL_DL_DISPLAYCONFIG_CONTYPE_VGA = 5 ADL_DL_DISPLAYCONFIG_CONTYPE_DVI_D = 6 ADL_DL_DISPLAYCONFIG_CONTYPE_DVI_I = 7 ADL_DL_DISPLAYCONFIG_CONTYPE_HDMI_TYPE_A = 8 ADL_DL_DISPLAYCONFIG_CONTYPE_HDMI_TYPE_B = 9 ADL_DL_DISPLAYCONFIG_CONTYPE_DISPLAYPORT = 10 # @} # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_DISPLAYINFO_ Definitions # for ADLDisplayInfo.iDisplayInfoMask and ADLDisplayInfo.iDisplayInfoValue # (bit-vector) # ///////////////////////////////////////////////////////////////////////// # / \defgroup define_displayinfomask Display Info Mask Values # @{ ADL_DISPLAY_DISPLAYINFO_DISPLAYCONNECTED = Constant(0x00000001).set_string('DISPLAYCONNECTED') ADL_DISPLAY_DISPLAYINFO_DISPLAYMAPPED = Constant(0x00000002).set_string('DISPLAYMAPPED') ADL_DISPLAY_DISPLAYINFO_NONLOCAL = Constant(0x00000004).set_string('NONLOCAL') ADL_DISPLAY_DISPLAYINFO_FORCIBLESUPPORTED = Constant(0x00000008).set_string('FORCIBLESUPPORTED') ADL_DISPLAY_DISPLAYINFO_GENLOCKSUPPORTED = Constant(0x00000010).set_string('GENLOCKSUPPORTED') ADL_DISPLAY_DISPLAYINFO_MULTIVPU_SUPPORTED = Constant(0x00000020).set_string('MULTIVPU_SUPPORTED') ADL_DISPLAY_DISPLAYINFO_LDA_DISPLAY = Constant(0x00000040).set_string('LDA_DISPLAY') ADL_DISPLAY_DISPLAYINFO_MODETIMING_OVERRIDESSUPPORTED = Constant(0x00000080).set_string('MODETIMING_OVERRIDESSUPPORTED') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_SINGLE = Constant(0x00000100).set_string('MANNER_SUPPORTED_SINGLE') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_CLONE = Constant(0x00000200).set_string('MANNER_SUPPORTED_CLONE') # / Legacy support for XP ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_2VSTRETCH = Constant(0x00000400).set_string('MANNER_SUPPORTED_2VSTRETCH') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_2HSTRETCH = Constant(0x00000800).set_string('MANNER_SUPPORTED_2HSTRETCH') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_EXTENDED = Constant(0x00001000).set_string('MANNER_SUPPORTED_EXTENDED') # / More support manners ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_NSTRETCH1GPU = Constant(0x00010000).set_string('MANNER_SUPPORTED_NSTRETCH1GPU') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_NSTRETCHNGPU = Constant(0x00020000).set_string('MANNER_SUPPORTED_NSTRETCHNGPU') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_RESERVED2 = Constant(0x00040000).set_string('MANNER_SUPPORTED_RESERVED2') ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_RESERVED3 = Constant(0x00080000).set_string('MANNER_SUPPORTED_RESERVED3') # / Projector display type ADL_DISPLAY_DISPLAYINFO_SHOWTYPE_PROJECTOR = Constant(0x00100000).set_string('SHOWTYPE_PROJECTOR') # @} # ///////////////////////////////////////////////////////////////////////// # ADL_ADAPTER_DISPLAY_MANNER_SUPPORTED_ Definitions # for ADLAdapterDisplayCap of ADL_Adapter_Display_Cap() # (bit-vector) # ///////////////////////////////////////////////////////////////////////// # / \defgroup define_adaptermanner Adapter Manner Support Values # @{ ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NOTACTIVE = 0x00000001 ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_SINGLE = 0x00000002 ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_CLONE = 0x00000004 ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NSTRETCH1GPU = 0x00000008 ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NSTRETCHNGPU = 0x00000010 # / Legacy support for XP ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_2VSTRETCH = 0x00000020 ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_2HSTRETCH = 0x00000040 ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_EXTENDED = 0x00000080 ADL_ADAPTER_DISPLAYCAP_PREFERDISPLAY_SUPPORTED = 0x00000100 ADL_ADAPTER_DISPLAYCAP_BEZEL_SUPPORTED = 0x00000200 # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_DISPLAYMAP_MANNER_ Definitions # for ADLDisplayMap.iDisplayMapMask and ADLDisplayMap.iDisplayMapValue # (bit-vector) # ///////////////////////////////////////////////////////////////////////// ADL_DISPLAY_DISPLAYMAP_MANNER_RESERVED = 0x00000001 ADL_DISPLAY_DISPLAYMAP_MANNER_NOTACTIVE = 0x00000002 ADL_DISPLAY_DISPLAYMAP_MANNER_SINGLE = 0x00000004 ADL_DISPLAY_DISPLAYMAP_MANNER_CLONE = 0x00000008 # Removed NSTRETCH ADL_DISPLAY_DISPLAYMAP_MANNER_RESERVED1 = 0x00000010 ADL_DISPLAY_DISPLAYMAP_MANNER_HSTRETCH = 0x00000020 ADL_DISPLAY_DISPLAYMAP_MANNER_VSTRETCH = 0x00000040 ADL_DISPLAY_DISPLAYMAP_MANNER_VLD = 0x00000080 # @} # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_DISPLAYMAP_OPTION_ Definitions # for iOption in function ADL_Display_DisplayMapConfig_Get # (bit-vector) # ///////////////////////////////////////////////////////////////////////// ADL_DISPLAY_DISPLAYMAP_OPTION_GPUINFO = 0x00000001 # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_DISPLAYTARGET_ Definitions # for ADLDisplayTarget.iDisplayTargetMask and # ADLDisplayTarget.iDisplayTargetValue # (bit-vector) # ///////////////////////////////////////////////////////////////////////// ADL_DISPLAY_DISPLAYTARGET_PREFERRED = 0x00000001 # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_POSSIBLEMAPRESULT_VALID Definitions # for ADLPossibleMapResult.iPossibleMapResultMask and # ADLPossibleMapResult.iPossibleMapResultValue # (bit-vector) # ///////////////////////////////////////////////////////////////////////// ADL_DISPLAY_POSSIBLEMAPRESULT_VALID = 0x00000001 ADL_DISPLAY_POSSIBLEMAPRESULT_BEZELSUPPORTED = 0x00000002 ADL_DISPLAY_POSSIBLEMAPRESULT_OVERLAPSUPPORTED = 0x00000004 # ///////////////////////////////////////////////////////////////////////// # ADL_DISPLAY_MODE_ Definitions # for ADLMode.iModeMask, ADLMode.iModeValue, and ADLMode.iModeFlag # (bit-vector) # ///////////////////////////////////////////////////////////////////////// # / \defgroup define_displaymode Display Mode Values # @{ ADL_DISPLAY_MODE_COLOURFORMAT_565 = 0x00000001 ADL_DISPLAY_MODE_COLOURFORMAT_8888 = 0x00000002 ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_000 = 0x00000004 ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_090 = 0x00000008 ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_180 = 0x00000010 ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_270 = 0x00000020 ADL_DISPLAY_MODE_REFRESHRATE_ROUNDED = 0x00000040 ADL_DISPLAY_MODE_REFRESHRATE_ONLY = 0x00000080 ADL_DISPLAY_MODE_PROGRESSIVE_FLAG = 0 ADL_DISPLAY_MODE_INTERLACED_FLAG = 2 # @} # ///////////////////////////////////////////////////////////////////////// # ADL_OSMODEINFO Definitions # ///////////////////////////////////////////////////////////////////////// # / \defgroup define_osmode OS Mode Values # @{ ADL_OSMODEINFOXPOS_DEFAULT = -640 ADL_OSMODEINFOYPOS_DEFAULT = 0 ADL_OSMODEINFOXRES_DEFAULT = 640 ADL_OSMODEINFOYRES_DEFAULT = 480 ADL_OSMODEINFOXRES_DEFAULT800 = 800 ADL_OSMODEINFOYRES_DEFAULT600 = 600 ADL_OSMODEINFOREFRESHRATE_DEFAULT = 60 ADL_OSMODEINFOCOLOURDEPTH_DEFAULT = 8 ADL_OSMODEINFOCOLOURDEPTH_DEFAULT16 = 16 ADL_OSMODEINFOCOLOURDEPTH_DEFAULT24 = 24 ADL_OSMODEINFOCOLOURDEPTH_DEFAULT32 = 32 ADL_OSMODEINFOORIENTATION_DEFAULT = 0 if defined(_WIN32) or defined(_WIN64): class DUMMY_ENUM(ENUM): DISPLAYCONFIG_ROTATION_IDENTITY = EnumItem(0).set_string('Identity') DISPLAYCONFIG_ROTATION_ROTATE90 = EnumItem(1).set_string('90°') DISPLAYCONFIG_ROTATION_ROTATE180 = EnumItem(2).set_string('180°') DISPLAYCONFIG_ROTATION_ROTATE270 = EnumItem(3).set_string('730°') DISPLAYCONFIG_ROTATION_FORCE_UINT32 = EnumItem(4).set_string('Force UINT32') ADL_OSMODEINFOORIENTATION_DEFAULT_WIN7 = ( DUMMY_ENUM.DISPLAYCONFIG_ROTATION_FORCE_UINT32 ) ADL_OSMODEFLAG_DEFAULT = 0 # @} # ///////////////////////////////////////////////////////////////////////// # ADLThreadingModel Enumeration # ///////////////////////////////////////////////////////////////////////// # / \defgroup thread_model # / Used with \ref ADL_Main_ControlX2_Create and \ref # ADL2_Main_ControlX2_Create to specify how ADL handles API calls when # executed by multiple threads concurrently. # / \brief Declares ADL threading behavior. # @{ class ADLThreadingModel(ENUM): # not < Default behavior. ADL will not enforce serialization of ADL # API executions by multiple threads. Multiple threads will be allowed # to enter to ADL at the same time. Note that ADL library is not # guaranteed to be thread-safe. Client that calls # ADL_Main_Control_Create have to provide its own mechanism for ADL # calls serialization. ADL_THREADING_UNLOCKED = EnumItem(0).set_string('Unlocked') # not < ADL will enforce serialization of ADL API when called by # multiple threads. Only single thread will be allowed to enter ADL # API at the time. This option makes ADL calls thread-safe. You # shouldn't use this option if ADL calls will be executed on Linux on # x-server rendering thread. It can cause the application to hung. ADL_THREADING_LOCKED = EnumItem(0).set_string('Locked') ADL_THREADING_UNLOCKED = ADLThreadingModel.ADL_THREADING_UNLOCKED ADL_THREADING_LOCKED = ADLThreadingModel.ADL_THREADING_LOCKED # @} # ///////////////////////////////////////////////////////////////////////// # ADLPurposeCode Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLPurposeCode(ENUM): ADL_PURPOSECODE_NORMAL = EnumItem(0).set_string('Normal') ADL_PURPOSECODE_HIDE_MODE_SWITCH = EnumItem(1).set_string('Hide Mode Switch.') ADL_PURPOSECODE_MODE_SWITCH = EnumItem(2).set_string('Mode switch.') ADL_PURPOSECODE_ATTATCH_DEVICE = EnumItem(3).set_string('Attach Device.') ADL_PURPOSECODE_DETACH_DEVICE = EnumItem(4).set_string('Detach device.') ADL_PURPOSECODE_SETPRIMARY_DEVICE = EnumItem(5).set_string('Set primary device.') ADL_PURPOSECODE_GDI_ROTATION = EnumItem(6).set_string('GDI Rotation.') ADL_PURPOSECODE_ATI_ROTATION = EnumItem(7).set_string('STI Rotation.') ADL_PURPOSECODE_NORMAL = ADLPurposeCode.ADL_PURPOSECODE_NORMAL ADL_PURPOSECODE_HIDE_MODE_SWITCH = ADLPurposeCode.ADL_PURPOSECODE_HIDE_MODE_SWITCH ADL_PURPOSECODE_MODE_SWITCH = ADLPurposeCode.ADL_PURPOSECODE_MODE_SWITCH ADL_PURPOSECODE_ATTATCH_DEVICE = ADLPurposeCode.ADL_PURPOSECODE_ATTATCH_DEVICE ADL_PURPOSECODE_DETACH_DEVICE = ADLPurposeCode.ADL_PURPOSECODE_DETACH_DEVICE ADL_PURPOSECODE_SETPRIMARY_DEVICE = ADLPurposeCode.ADL_PURPOSECODE_SETPRIMARY_DEVICE ADL_PURPOSECODE_GDI_ROTATION = ADLPurposeCode.ADL_PURPOSECODE_GDI_ROTATION ADL_PURPOSECODE_ATI_ROTATION = ADLPurposeCode.ADL_PURPOSECODE_ATI_ROTATION # ///////////////////////////////////////////////////////////////////////// # ADLAngle Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLAngle(ENUM): ADL_ANGLE_LANDSCAPE = EnumItem(0).set_string('Landscape') ADL_ANGLE_ROTATERIGHT = EnumItem(90).set_string('Rotate right') ADL_ANGLE_ROTATE180 = EnumItem(150).set_string('Rotate 180') ADL_ANGLE_ROTATELEFT = EnumItem(270).set_string('Rotate left') ADL_ANGLE_LANDSCAPE = ADLAngle.ADL_ANGLE_LANDSCAPE ADL_ANGLE_ROTATERIGHT = ADLAngle.ADL_ANGLE_ROTATERIGHT ADL_ANGLE_ROTATE180 = ADLAngle.ADL_ANGLE_ROTATE180 ADL_ANGLE_ROTATELEFT = ADLAngle.ADL_ANGLE_ROTATELEFT # ///////////////////////////////////////////////////////////////////////// # ADLOrientationDataType Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLOrientationDataType(ENUM): ADL_ORIENTATIONTYPE_OSDATATYPE = EnumItem(1).set_string('OS Data Type') ADL_ORIENTATIONTYPE_NONOSDATATYPE = EnumItem(2).set_string('Non OS data type') ADL_ORIENTATIONTYPE_OSDATATYPE = ADLOrientationDataType.ADL_ORIENTATIONTYPE_OSDATATYPE ADL_ORIENTATIONTYPE_NONOSDATATYPE = ADLOrientationDataType.ADL_ORIENTATIONTYPE_NONOSDATATYPE # ///////////////////////////////////////////////////////////////////////// # ADLPanningMode Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLPanningMode(ENUM): ADL_PANNINGMODE_NO_PANNING = EnumItem(0).set_string('No panning') ADL_PANNINGMODE_AT_LEAST_ONE_NO_PANNING = EnumItem(1).set_string('At Least one not painting') ADL_PANNINGMODE_ALLOW_PANNING = EnumItem(2).set_string('Allow panning') ADL_PANNINGMODE_NO_PANNING = ADLPanningMode.ADL_PANNINGMODE_NO_PANNING ADL_PANNINGMODE_AT_LEAST_ONE_NO_PANNING = ADLPanningMode.ADL_PANNINGMODE_AT_LEAST_ONE_NO_PANNING ADL_PANNINGMODE_ALLOW_PANNING = ADLPanningMode.ADL_PANNINGMODE_ALLOW_PANNING # ///////////////////////////////////////////////////////////////////////// # ADLLARGEDESKTOPTYPE Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLLARGEDESKTOPTYPE(ENUM): ADL_LARGEDESKTOPTYPE_NORMALDESKTOP = EnumItem(0).set_string('Normal Desktop') ADL_LARGEDESKTOPTYPE_PSEUDOLARGEDESKTOP = EnumItem(1).set_string('Pseudo Large Desktop') ADL_LARGEDESKTOPTYPE_VERYLARGEDESKTOP = EnumItem(2).set_string('Very large desktop') ADL_LARGEDESKTOPTYPE_NORMALDESKTOP = ADLLARGEDESKTOPTYPE.ADL_LARGEDESKTOPTYPE_NORMALDESKTOP ADL_LARGEDESKTOPTYPE_PSEUDOLARGEDESKTOP = ADLLARGEDESKTOPTYPE.ADL_LARGEDESKTOPTYPE_PSEUDOLARGEDESKTOP ADL_LARGEDESKTOPTYPE_VERYLARGEDESKTOP = ADLLARGEDESKTOPTYPE.ADL_LARGEDESKTOPTYPE_VERYLARGEDESKTOP # ///////////////////////////////////////////////////////////////////////// # ADLPlatform Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLPlatForm(ENUM): GRAPHICS_PLATFORM_DESKTOP = EnumItem(0).set_string('Desktop') GRAPHICS_PLATFORM_MOBILE = EnumItem(1).set_string('Mobile') GRAPHICS_PLATFORM_DESKTOP = ADLPlatForm.GRAPHICS_PLATFORM_DESKTOP GRAPHICS_PLATFORM_MOBILE = ADLPlatForm.GRAPHICS_PLATFORM_MOBILE # ///////////////////////////////////////////////////////////////////////// # ADLGraphicCoreGeneration Enumeration # ///////////////////////////////////////////////////////////////////////// class ADLGraphicCoreGeneration(ENUM): ADL_GRAPHIC_CORE_GENERATION_UNDEFINED = EnumItem(0).set_string('Undefined') ADL_GRAPHIC_CORE_GENERATION_PRE_GCN = EnumItem(1).set_string('Pre GCN') ADL_GRAPHIC_CORE_GENERATION_GCN = EnumItem(1).set_string('GCN') ADL_GRAPHIC_CORE_GENERATION_UNDEFINED = ADLGraphicCoreGeneration.ADL_GRAPHIC_CORE_GENERATION_UNDEFINED ADL_GRAPHIC_CORE_GENERATION_PRE_GCN = ADLGraphicCoreGeneration.ADL_GRAPHIC_CORE_GENERATION_PRE_GCN ADL_GRAPHIC_CORE_GENERATION_GCN = ADLGraphicCoreGeneration.ADL_GRAPHIC_CORE_GENERATION_GCN # Other Definitions for internal use # Values for ADL_Display_WriteAndReadI2CRev_Get() ADL_I2C_MAJOR_API_REV = 0x00000001 ADL_I2C_MINOR_DEFAULT_API_REV = 0x00000000 ADL_I2C_MINOR_OEM_API_REV = 0x00000001 # Values for ADL_Display_WriteAndReadI2C() ADL_DL_I2C_LINE_OEM = 0x00000001 ADL_DL_I2C_LINE_OD_CONTROL = 0x00000002 ADL_DL_I2C_LINE_OEM2 = 0x00000003 ADL_DL_I2C_LINE_OEM3 = 0x00000004 ADL_DL_I2C_LINE_OEM4 = 0x00000005 ADL_DL_I2C_LINE_OEM5 = 0x00000006 ADL_DL_I2C_LINE_OEM6 = 0x00000007 # Max size of I2C data buffer ADL_DL_I2C_MAXDATASIZE = 0x00000040 ADL_DL_I2C_MAXWRITEDATASIZE = 0x0000000C ADL_DL_I2C_MAXADDRESSLENGTH = 0x00000006 ADL_DL_I2C_MAXOFFSETLENGTH = 0x00000004 # / Values for ADLDisplayProperty.iPropertyType ADL_DL_DISPLAYPROPERTY_TYPE_UNKNOWN = 0 ADL_DL_DISPLAYPROPERTY_TYPE_EXPANSIONMODE = 1 ADL_DL_DISPLAYPROPERTY_TYPE_USEUNDERSCANSCALING = 2 # / Enables ITC processing for HDMI panels that are capable of the feature ADL_DL_DISPLAYPROPERTY_TYPE_ITCFLAGENABLE = 9 ADL_DL_DISPLAYPROPERTY_TYPE_DOWNSCALE = 11 # / Values for ADLDisplayContent.iContentType # / Certain HDMI panels that support ITC have support for a feature such # that, the display on the panel # / can be adjusted to optimize the view of the content being displayed, # depending on the type of content. ADL_DL_DISPLAYCONTENT_TYPE_GRAPHICS = Constant(1).set_string('Graphics') ADL_DL_DISPLAYCONTENT_TYPE_PHOTO = Constant(2).set_string('Photo') ADL_DL_DISPLAYCONTENT_TYPE_CINEMA = Constant(4).set_string('Cinema') ADL_DL_DISPLAYCONTENT_TYPE_GAME = Constant(8).set_string('Game') # values for ADLDisplayProperty.iExpansionMode ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_CENTER = Constant(0).set_string('Center') ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_FULLSCREEN = Constant(1).set_string('Full Screen') ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_ASPECTRATIO = Constant(2).set_string('Aspect Ratio') # /\defgroup define_dither_states Dithering options # @{ # / Dithering disabled. ADL_DL_DISPLAY_DITHER_DISABLED = Constant(0).set_string('Disabled') # / Use default driver settings for dithering. Note that the default # setting could be dithering disabled. ADL_DL_DISPLAY_DITHER_DRIVER_DEFAULT = Constant(1).set_string('Default') # / Temporal dithering to 6 bpc. Note that if the input is 12 bits, the # two least significant bits will be truncated. ADL_DL_DISPLAY_DITHER_FM6 = Constant(2).set_string('Temporal dithering to 6 bpc') # / Temporal dithering to 8 bpc. ADL_DL_DISPLAY_DITHER_FM8 = Constant(3).set_string('Temporal dithering to 8 bpc') # / Temporal dithering to 10 bpc. ADL_DL_DISPLAY_DITHER_FM10 = Constant(4).set_string('Temporal dithering to 10 bpc') # / Spatial dithering to 6 bpc. Note that if the input is 12 bits, the two # least significant bits will be truncated. ADL_DL_DISPLAY_DITHER_DITH6 = Constant(5).set_string('Spatial dithering to 6 bpc') # / Spatial dithering to 8 bpc. ADL_DL_DISPLAY_DITHER_DITH8 = Constant(6).set_string('Spatial dithering to 8 bpc') # / Spatial dithering to 10 bpc. ADL_DL_DISPLAY_DITHER_DITH10 = Constant(7).set_string('Spatial dithering to 10 bpc') # / Spatial dithering to 6 bpc. Random number generators are reset every # frame, so the same input value of a certain pixel will always be # dithered to the same output value. Note that if the input is 12 bits, # the two least significant bits will be truncated. ADL_DL_DISPLAY_DITHER_DITH6_NO_FRAME_RAND = Constant(8).set_string('Spatial dithering to 6 bpc random') # / Spatial dithering to 8 bpc. Random number generators are reset every # frame, so the same input value of a certain pixel will always be # dithered
``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- polyvander, polyvander3d. polyval2d, polyval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy, degz = ideg x, y, z = np.array((x, y, z), copy=0) + 0.0 vx = polyvander(x, degx) vy = polyvander(y, degy) vz = polyvander(z, degz) v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] # einsum bug #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) return v.reshape(v.shape[:-3] + (-1,)) def polyfit(x, y, deg, rcond=None, full=False, w=None): """ Least-squares fit of a polynomial to data. Return the coefficients of a polynomial of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, where `n` is `deg`. Parameters ---------- x : array_like, shape (`M`,) x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. y : array_like, shape (`M`,) or (`M`, `K`) y-coordinates of the sample points. Several sets of sample points sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. deg : int or array_like Degree of the fitting polynomial. If `deg` is a single integer all terms up to and including the `deg`'th term are included. `deg` may alternatively be a list or array specifying which terms in the Legendre expansion to include in the fit. .. versionchanged:: 1.11.0 `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be ignored. The default value is ``len(x)*eps``, where `eps` is the relative precision of the platform's float type, about 2e-16 in most cases. full : bool, optional Switch determining the nature of the return value. When ``False`` (the default) just the coefficients are returned; when ``True``, diagnostic information from the singular value decomposition (used to solve the fit's matrix equation) is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. .. versionadded:: 1.5.0 Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) Polynomial coefficients ordered from low to high. If `y` was 2-D, the coefficients in column `k` of `coef` represent the polynomial fit to the data in `y`'s `k`-th column. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Raises ------ RankWarning Raised if the matrix in the least-squares fit is rank deficient. The warning is only raised if `full` == False. The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- chebfit, legfit, lagfit, hermfit, hermefit polyval : Evaluates a polynomial. polyvander : Vandermonde matrix for powers. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the polynomial `p` that minimizes the sum of the weighted squared errors .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up the (typically) over-determined matrix equation: .. math :: V(x) * c = w * y, where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the coefficients to be solved for, `w` are the weights, and `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected (and `full` == ``False``), a `RankWarning` will be raised. This means that the coefficient values may be poorly determined. Fitting to a lower order polynomial will usually get rid of the warning (but may not be what you want, of course; if you have independent reason(s) for choosing the degree which isn't working, you may have to: a) reconsider those reasons, and/or b) reconsider the quality of your data). The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Polynomial fits using double precision tend to "fail" at about (polynomial) degree 20. Fits using Chebyshev or Legendre series are generally better conditioned, but much can still depend on the distribution of the sample points and the smoothness of the data. If the quality of the fit is inadequate, splines may be a good alternative. Examples -------- >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) >>> stats # note the large SSR, explaining the rather poor results [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-014] Same thing without the added noise >>> y = x**3 - x >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, 1.00000000e+00]) >>> stats # note the minuscule SSR [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-014] """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 deg = np.asarray([deg,], dtype=int).flatten() # check arguments. if deg.size < 1: raise TypeError("expected deg to be one or more integers") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") if deg.size == 1: restricted_fit = False lmax = deg[0] order = lmax + 1 else: restricted_fit = True lmax = deg.max() order = deg.size # set up the least squares matrices in transposed form van = polyvander(x, lmax) if restricted_fit: van = van[:, deg] lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(x) != len(w): raise TypeError("expected x and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] =
= _messages.StringField(6) resourceVersion = _messages.StringField(7) watch = _messages.BooleanField(8) class AnthoseventsCustomresourcedefinitionsGetRequest(_messages.Message): r"""A AnthoseventsCustomresourcedefinitionsGetRequest object. Fields: name: The name of the CustomResourceDefinition being retrieved. If needed, replace {namespace_id} with the project ID. """ name = _messages.StringField(1, required=True) class AnthoseventsCustomresourcedefinitionsListRequest(_messages.Message): r"""A AnthoseventsCustomresourcedefinitionsListRequest object. Fields: continue_: Optional encoded string to continue paging. fieldSelector: Allows to filter resources based on a specific value for a field name. Send this in a query string format. i.e. 'metadata.name%3Dlorem'. Not currently used by Cloud Run. includeUninitialized: Not currently used by Cloud Run. labelSelector: Allows to filter resources based on a label. Supported operations are =, !=, exists, in, and notIn. limit: A integer attribute. parent: The project ID or project number from which the storages should be listed. resourceVersion: The baseline resource version from which the list or watch operation should start. Not currently used by Cloud Run. watch: Flag that indicates that the client expects to watch this resource as well. Not currently used by Cloud Run. """ continue_ = _messages.StringField(1) fieldSelector = _messages.StringField(2) includeUninitialized = _messages.BooleanField(3) labelSelector = _messages.StringField(4) limit = _messages.IntegerField(5, variant=_messages.Variant.INT32) parent = _messages.StringField(6) resourceVersion = _messages.StringField(7) watch = _messages.BooleanField(8) class AnthoseventsNamespacesBrokersCreateRequest(_messages.Message): r"""A AnthoseventsNamespacesBrokersCreateRequest object. Fields: broker: A Broker resource to be passed as the request body. parent: The namespace name. """ broker = _messages.MessageField('Broker', 1) parent = _messages.StringField(2, required=True) class AnthoseventsNamespacesBrokersDeleteRequest(_messages.Message): r"""A AnthoseventsNamespacesBrokersDeleteRequest object. Fields: name: The relative name of the broker being deleted, including the namespace """ name = _messages.StringField(1, required=True) class AnthoseventsNamespacesBrokersGetRequest(_messages.Message): r"""A AnthoseventsNamespacesBrokersGetRequest object. Fields: name: The name of the Broker being retrieved. """ name = _messages.StringField(1, required=True) class AnthoseventsNamespacesBrokersListRequest(_messages.Message): r"""A AnthoseventsNamespacesBrokersListRequest object. Fields: continue_: Optional encoded string to continue paging. fieldSelector: Allows to filter resources based on a specific value for a field name. Send this in a query string format. i.e. 'metadata.name%3Dlorem'. Not currently used by Cloud Run. includeUninitialized: Not currently used by Cloud Run. labelSelector: Allows to filter resources based on a label. Supported operations are =, !=, exists, in, and notIn. limit: A integer attribute. parent: The namespace name. resourceVersion: The baseline resource version from which the list or watch operation should start. Not currently used by Cloud Run. watch: Flag that indicates that the client expects to watch this resource as well. Not currently used by Cloud Run. """ continue_ = _messages.StringField(1) fieldSelector = _messages.StringField(2) includeUninitialized = _messages.BooleanField(3) labelSelector = _messages.StringField(4) limit = _messages.IntegerField(5, variant=_messages.Variant.INT32) parent = _messages.StringField(6, required=True) resourceVersion = _messages.StringField(7) watch = _messages.BooleanField(8) class AnthoseventsNamespacesCloudauditlogssourcesCreateRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudauditlogssourcesCreateRequest object. Fields: cloudAuditLogsSource: A CloudAuditLogsSource resource to be passed as the request body. parent: The namespace name. """ cloudAuditLogsSource = _messages.MessageField('CloudAuditLogsSource', 1) parent = _messages.StringField(2, required=True) class AnthoseventsNamespacesCloudauditlogssourcesDeleteRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudauditlogssourcesDeleteRequest object. Fields: apiVersion: Cloud Run currently ignores this parameter. kind: Cloud Run currently ignores this parameter. name: The name of the cloudauditlogssource being deleted. If needed, replace {namespace_id} with the project ID. propagationPolicy: Specifies the propagation policy of delete. Cloud Run currently ignores this setting, and deletes in the background. Please see kubernetes.io/docs/concepts/workloads/controllers/garbage- collection/ for more information. """ apiVersion = _messages.StringField(1) kind = _messages.StringField(2) name = _messages.StringField(3, required=True) propagationPolicy = _messages.StringField(4) class AnthoseventsNamespacesCloudauditlogssourcesGetRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudauditlogssourcesGetRequest object. Fields: name: The name of the cloudauditlogssource being retrieved. If needed, replace {namespace_id} with the project ID. """ name = _messages.StringField(1, required=True) class AnthoseventsNamespacesCloudauditlogssourcesListRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudauditlogssourcesListRequest object. Fields: continue_: Optional encoded string to continue paging. fieldSelector: Allows to filter resources based on a specific value for a field name. Send this in a query string format. i.e. 'metadata.name%3Dlorem'. Not currently used by Cloud Run. includeUninitialized: Not currently used by Cloud Run. labelSelector: Allows to filter resources based on a label. Supported operations are =, !=, exists, in, and notIn. limit: The maximum number of records that should be returned. parent: The namespaces name resourceVersion: The baseline resource version from which the list or watch operation should start. Not currently used by Cloud Run. watch: Flag that indicates that the client expects to watch this resource as well. Not currently used by Cloud Run. """ continue_ = _messages.StringField(1) fieldSelector = _messages.StringField(2) includeUninitialized = _messages.BooleanField(3) labelSelector = _messages.StringField(4) limit = _messages.IntegerField(5, variant=_messages.Variant.INT32) parent = _messages.StringField(6, required=True) resourceVersion = _messages.StringField(7) watch = _messages.BooleanField(8) class AnthoseventsNamespacesCloudauditlogssourcesReplaceCloudAuditLogsSourceRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudauditlogssourcesReplaceCloudAuditLogsSource Request object. Fields: cloudAuditLogsSource: A CloudAuditLogsSource resource to be passed as the request body. name: The name of the cloudauditlogssource being retrieved. If needed, replace {namespace_id} with the project ID. """ cloudAuditLogsSource = _messages.MessageField('CloudAuditLogsSource', 1) name = _messages.StringField(2, required=True) class AnthoseventsNamespacesCloudpubsubsourcesCreateRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudpubsubsourcesCreateRequest object. Fields: cloudPubSubSource: A CloudPubSubSource resource to be passed as the request body. parent: The namespace in which this cloudpubsubsource should be created. """ cloudPubSubSource = _messages.MessageField('CloudPubSubSource', 1) parent = _messages.StringField(2, required=True) class AnthoseventsNamespacesCloudpubsubsourcesDeleteRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudpubsubsourcesDeleteRequest object. Fields: apiVersion: Cloud Run currently ignores this parameter. kind: Cloud Run currently ignores this parameter. name: The name of the cloudpubsubsource being deleted. If needed, replace {namespace_id} with the project ID. propagationPolicy: Specifies the propagation policy of delete. Cloud Run currently ignores this setting, and deletes in the background. Please see kubernetes.io/docs/concepts/workloads/controllers/garbage- collection/ for more information. """ apiVersion = _messages.StringField(1) kind = _messages.StringField(2) name = _messages.StringField(3, required=True) propagationPolicy = _messages.StringField(4) class AnthoseventsNamespacesCloudpubsubsourcesGetRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudpubsubsourcesGetRequest object. Fields: name: The name of the cloudpubsubsource being retrieved. If needed, replace {namespace_id} with the project ID. """ name = _messages.StringField(1, required=True) class AnthoseventsNamespacesCloudpubsubsourcesListRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudpubsubsourcesListRequest object. Fields: continue_: Optional encoded string to continue paging. fieldSelector: Allows to filter resources based on a specific value for a field name. Send this in a query string format. i.e. 'metadata.name%3Dlorem'. Not currently used by Cloud Run. includeUninitialized: Not currently used by Cloud Run. labelSelector: Allows to filter resources based on a label. Supported operations are =, !=, exists, in, and notIn. limit: The maximum number of records that should be returned. parent: The namespace from which the cloudpubsubsources should be listed. resourceVersion: The baseline resource version from which the list or watch operation should start. Not currently used by Cloud Run. watch: Flag that indicates that the client expects to watch this resource as well. Not currently used by Cloud Run. """ continue_ = _messages.StringField(1) fieldSelector = _messages.StringField(2) includeUninitialized = _messages.BooleanField(3) labelSelector = _messages.StringField(4) limit = _messages.IntegerField(5, variant=_messages.Variant.INT32) parent = _messages.StringField(6, required=True) resourceVersion = _messages.StringField(7) watch = _messages.BooleanField(8) class AnthoseventsNamespacesCloudpubsubsourcesReplaceCloudPubSubSourceRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudpubsubsourcesReplaceCloudPubSubSourceRequest object. Fields: cloudPubSubSource: A CloudPubSubSource resource to be passed as the request body. name: The name of the cloudpubsubsource being retrieved. If needed, replace {namespace_id} with the project ID. """ cloudPubSubSource = _messages.MessageField('CloudPubSubSource', 1) name = _messages.StringField(2, required=True) class AnthoseventsNamespacesCloudschedulersourcesCreateRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudschedulersourcesCreateRequest object. Fields: cloudSchedulerSource: A CloudSchedulerSource resource to be passed as the request body. parent: The namespace in which this cloudschedulersource should be created. """ cloudSchedulerSource = _messages.MessageField('CloudSchedulerSource', 1) parent = _messages.StringField(2, required=True) class AnthoseventsNamespacesCloudschedulersourcesDeleteRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudschedulersourcesDeleteRequest object. Fields: apiVersion: Cloud Run currently ignores this parameter. kind: Cloud Run currently ignores this parameter. name: The name of the cloudschedulersource being deleted. If needed, replace {namespace_id} with the project ID. propagationPolicy: Specifies the propagation policy of delete. Cloud Run currently ignores this setting, and deletes in the background. Please see kubernetes.io/docs/concepts/workloads/controllers/garbage- collection/ for more information. """ apiVersion = _messages.StringField(1) kind = _messages.StringField(2) name = _messages.StringField(3, required=True) propagationPolicy = _messages.StringField(4) class AnthoseventsNamespacesCloudschedulersourcesGetRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudschedulersourcesGetRequest object. Fields: name: The name of the cloudschedulersource being retrieved. If needed, replace {namespace_id} with the project ID. """ name = _messages.StringField(1, required=True) class AnthoseventsNamespacesCloudschedulersourcesListRequest(_messages.Message): r"""A AnthoseventsNamespacesCloudschedulersourcesListRequest object. Fields: continue_: Optional encoded string to continue paging. fieldSelector: Allows to filter resources based on a specific value for a field name. Send this in a query string format. i.e. 'metadata.name%3Dlorem'. Not currently used by Cloud Run. includeUninitialized: Not currently used by Cloud Run. labelSelector: Allows to filter resources based on a label. Supported operations are =, !=, exists, in, and notIn. limit: The maximum number of records that should be returned. parent: The namespace from which the cloudschedulersources should be listed. resourceVersion: The baseline resource version from which the list or watch operation should start. Not currently used by
"""The Basic Model Interface.""" __version__ = '0.2' class BmiBase(object): """Methods that control model execution. These BMI functions are critical to plug-and-play modeling because they give a calling component fine-grained control over the model execution. """ def initialize(self, filename): """Perform startup tasks for the model. Perform all tasks that take place before entering the model's time loop, including opening files and initializing the model state. Model inputs are read from a text-based configuration file, specified by `filename`. Parameters ---------- filename : str, optional The path to the model configuration file. Notes ----- Models should be refactored, if necessary, to use a configuration file. CSDMS does not impose any constraint on how configuration files are formatted, although YAML is recommended. A template of a model's configuration file with placeholder values is used by the BMI. """ pass def update(self): """Advance model state by one time step. Perform all tasks that take place within one pass through the model's time loop. This typically includes incrementing all of the model's state variables. If the model's state variables don't change in time, then they can be computed by the :func:`initialize` method and this method can return with no action. """ pass def update_until(self, time): """Advance model state until the given time. Parameters ---------- time : float A model time value. See Also -------- update """ pass def update_frac(self, time_frac): """Advance model state by a fraction of a time step. Parameters ---------- time_frac : float A fraction of a model time step value. See Also -------- update """ pass def finalize(self): """Perform tear-down tasks for the model. Perform all tasks that take place after exiting the model's time loop. This typically includes deallocating memory, closing files, and printing reports. """ pass class BmiInfo(object): """Methods that get metadata about a model.""" def get_component_name(self): """Name of the component. Returns ------- str The name of the component. """ pass def get_input_var_names(self): """List of a model's input variables. Input variable names must be `CSDMS Standard Names`_, also known as *long variable names*. Returns ------- tuple of str The input variables for the model. Notes ----- Standard Names enable the CSDMS framework to determine whether an input variable in one model is equivalent to, or compatible with, an output variable in another model. This allows the framework to automatically connect components. Standard Names do not have to be used within the model. .. _CSDMS Standard Names: http://csdms.colorado.edu/wiki/CSDMS_Standard_Names """ pass def get_output_var_names(self): """List of a model's output variables. Output variable names must be `CSDMS Standard Names`_, also known as *long variable names*. Returns ------- tuple of str The output variables for the model. See Also -------- get_input_var_names Notes ----- .. _CSDMS Standard Names: http://csdms.colorado.edu/wiki/CSDMS_Standard_Names """ pass class BmiTime(object): """Methods that get time information from a model.""" def get_start_time(self): """Start time of the model. Model times should be of type float. The default model start time is 0. Returns ------- float The model start time. """ pass def get_current_time(self): """Current time of the model. Returns ------- float The current model time. See Also -------- get_start_time """ pass def get_end_time(self): """End time of the model. Returns ------- float The maximum model time. See Also -------- get_start_time """ pass def get_time_step(self): """Current time step of the model. The model time step should be of type float. The default time step is 1.0. Returns ------- float The time step used in model. """ pass def get_time_units(self): """Time units of the model. Returns ------- float The model time unit; e.g., `days` or `s`. Notes ----- CSDMS uses the `UDUNITS`_ package developed by Unidata. .. _UDUNITS: https://www.unidata.ucar.edu/software/udunits """ pass class BmiVars(object): """Methods that get information about input and output variables. These BMI functions obtain information about a particular input or output variable. They must accommodate any variable that is returned by the BMI methods :func:`~bmi.info.BmiInfo.get_input_var_names` or :func:`~bmi.info.BmiInfo.get_output_var_names`. """ def get_var_type(self, var_name): """Get data type of the given variable. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- str The Python variable type; e.g., ``str``, ``int``, ``float``. """ pass def get_var_units(self, var_name): """Get units of the given variable. Standard unit names, in lower case, should be used, such as ``meters`` or ``seconds``. Standard abbreviations, like ``m`` for meters, are also supported. For variables with compound units, each unit name is separated by a single space, with exponents other than 1 placed immediately after the name, as in ``m s-1`` for velocity, ``W m-2`` for an energy flux, or ``km2`` for an area. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- str The variable units. Notes ----- CSDMS uses the `UDUNITS`_ package from Unidata. .. _UDUNITS: http://www.unidata.ucar.edu/software/udunits """ pass def get_var_itemsize(self, var_name): """Get memory use for each array element, in bytes. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- int Item size in bytes. """ pass def get_var_nbytes(self, var_name): """Get size, in bytes, of the given variable. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- int The size of the variable, counted in bytes. """ pass def get_var_grid(self, var_name): """Get the grid identifier for the given variable. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- int The grid identifier. See Also -------- bmi.info.BmiInfo.get_input_var_names : Get *var_name* from this method or from :func:`~bmi.info.BmiInfo.get_output_var_names`. """ pass class BmiGetter(object): """Get variable values from a model. Methods that get variables from a model's state. Often a model's state variables are changing with each time step, so getters are called to get current values. """ def get_value(self, var_name): """Get a copy of the values of the given variable. This is a getter for the model, used to access the model's current state. It returns a *copy* of a model variable, with the return type, size and rank dependent on the variable. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- array_like The value of a model variable. """ pass def get_value_ref(self, var_name): """Get a reference to the values of the given variable. This is a getter for the model, used to access the model's current state. It returns a *reference* to a model variable, with the return type, size and rank dependent on the variable. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. Returns ------- array_like A reference to a model variable. """ pass def get_value_at_indices(self, var_name, indices): """Get values at particular locations. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. indices : array_like The indices into the variable array. Returns ------- array_like Value of the model variable at the given location. """ pass class BmiSetter(object): """Set values into a component. Methods that set variables of a model's state. """ def set_value(self, var_name, src): """Specify a new value for a model variable. This is the setter for the model, used to change the model's current state. It accepts, through *src*, a new value for a model variable, with the type, size and rank of *src* dependent on the variable. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. src : array_like The new value for the specified variable. """ pass def set_value_at_indices(self, var_name, indices, src): """Specify a new value for a model variable at particular indices. Parameters ---------- var_name : str An input or output variable name, a CSDMS Standard Name. indices : array_like The indices into the variable array. src : array_like The new value for the specified variable. """ pass class BmiGrid(object): """Methods that describe a grid. """ def get_grid_rank(self, grid_id): """Get number of dimensions of the computational grid.
= True else: self._alphabet_list = alphabet if probabilities: probabilities[0] += (1.0 - sum(probabilities)) self._alphabet = list(self._alphabet_list) self._probabilities = probabilities @classmethod def build(cls, builder: Builder, spec): alphabet = None probabilities = None if isinstance(spec, (tuple, list)): len_spec = spec[0] if len(spec) > 1: alphabet = spec[1] if len(spec) > 2: probabilities = spec[1] else: len_spec = spec return cls(builder.rand, BuildGenerator(builder, len_spec), alphabet, probabilities) def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> Dict[Any, Any]: str_len = self._len_generator.generate(None, key, context) if size is None: return ''.join( self._rand.choice(self._alphabet, str_len, p=self._probabilities)) else: letters = self._rand.choice(self._alphabet, [size, str_len], p=self._probabilities) return [''.join(s) for s in letters] def result_type(self) -> type: return str def save(self) -> Any: if self._using_defaults: return ('str', (self._len_generator.save(),)) return (Names.STR, (self._len_generator.save(), self._alphabet_list, self._probabilities)) class ArrayGenerator(Generator): """Generator for list of values of value_generator, with a length.""" def __init__(self, rand: numpy.random.Generator, value_generator: Generator, len_generator: Generator): """Initializes an array generator. Args: value_generator: generates the underlying values in the array. len_generator: generates the length of the output array. On None generated by this, our output is None. """ super().__init__(rand) if not _IsOptionalType(len_generator.result_type(), int): raise ValueError( 'ArrayGenerator needs an integer length generator. ' f'Got: {len_generator.result_type()}') self._value_generator = value_generator self._len_generator = len_generator @classmethod def build(cls, builder: Builder, spec): if not isinstance(spec, (tuple, list)) or len(spec) != 2: raise ValueError(f'{cls} expects two underlying arguments') return cls(builder.rand, BuildGenerator(builder, spec[0]), BuildGenerator(builder, spec[1])) def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> List[Any]: array_len = self._len_generator.generate(size, key, context) if array_len is None: return None if size is None: return self._value_generator.generate(array_len, key, context) else: return [ self._value_generator.generate(sz, key, context) for sz in array_len ] def _list_type(self) -> type: """Underlying list type.""" return List[self._value_generator.result_type()] def result_type(self) -> type: if _IsOptional(self._len_generator.result_type(),): return Optional[self._list_type()] return self._list_type() def save(self) -> Any: return (Names.ARRAY, (self._value_generator.save(), self._len_generator.save())) class PerElementArrayGenerator(ArrayGenerator): """Similar with ArrayGenerator, but the generation is done for every element. This is more expensive, but allows a choice to be made at each element, as opposed to one choice per each generated array. On the downside, when generating an array of arrays with this, by specifying a size parameter, that size is fixed.""" def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> List[Any]: array_len = self._len_generator.generate(None, key, context) if array_len is None: return None return [ self._value_generator.generate(size, key, context) for _ in range(array_len) ] def save(self) -> Any: return (Names.ELEM_ARRAY, (self._value_generator.save(), self._len_generator.save())) class MakeSetGenerator(Generator): """Converts the underlying generated list to a set.""" def __init__(self, rand: numpy.random.Generator, list_generator: Generator): """Makes a set out of a generated list.""" super().__init__(rand) if _GetStructuredTypeName(list_generator.result_type()) != 'list': raise ValueError( 'MakeSetGenerator expecting an underlying generator of ' f'a list. Got: {list_generator.result_type()}') self._list_generator = list_generator @classmethod def build(cls, builder: Builder, spec): return cls(builder.rand, BuildGenerator(builder, spec)) def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> Set[Any]: result = self._list_generator.generate(size, key, context) if result is None: return result elif size is None: return set(result) else: return [set(sub_list) for sub_list in result] def result_type(self) -> type: return Set[self._list_generator.result_type().__args__[0]] def save(self) -> Any: return (Names.SET, self._list_generator.save()) class DictGenerator(Generator): """Generates random dictionaries with len, keys and values as chosen.""" def __init__(self, rand: numpy.random.Generator, key_generator: Generator, value_generator: Generator, len_generator: Generator): """Generates a dictionary of length determined by len_generator, """ super().__init__(rand) if not _IsOptionalType(len_generator.result_type(), int): raise ValueError('DictGenerator needs an integer length generator. ' f'Got: {len_generator}') self._key_generator = key_generator self._value_generator = value_generator self._len_generator = len_generator @classmethod def build(cls, builder: Builder, spec): if not isinstance(spec, (tuple, list)) or len(spec) != 3: raise ValueError(f'{cls} expects two underlying arguments') return cls(builder.rand, BuildGenerator(builder, spec[0]), BuildGenerator(builder, spec[1]), BuildGenerator(builder, spec[2])) def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> Dict[Any, Any]: if size is None: return self._generate_dict(key, context) return [self._generate_dict(key, context) for _ in range(size)] def _generate_dict(self, key, context) -> Dict[Any, Any]: dict_len = self._len_generator.generate(None, key, context) if dict_len is None: return None return dict( zip(self._key_generator.generate(dict_len, key, context), self._value_generator.generate(dict_len, key, context))) def _dict_type(self) -> type: """Underlying returned dictionary type.""" return Dict[self._key_generator.result_type(), self._value_generator.result_type()] def result_type(self) -> type: """Top level returned type, maybe optional.""" if _IsOptional(self._len_generator.result_type()): return Optional[self._dict_type()] return self._dict_type() def save(self) -> Any: return (Names.DICT, (self._key_generator.save(), self._value_generator.save(), self._len_generator.save())) class CombineGenerator(Generator): """Combines two values from two underlying generators.""" def __init__(self, rand: numpy.random.Generator, first: Generator, second: Generator, name: str, combiner: Callable[[Any, Any], Any], first_types: List[type], second_types: List[type], out_type: type): super().__init__(rand) if not any(issubclass(first.result_type(), t) for t in first_types): raise ValueError( f'{type(self)} can be built with first argument generating ' f'{first_types}. Found: {first.result_type()}') if not any(issubclass(second.result_type(), t) for t in second_types): raise ValueError( f'{type(self)} can be built with second argument generating ' f'{second_types}. Found: {second.result_type()}') self._first = first self._second = second self._combiner = combiner self._name = name self._out_type = out_type def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> Dict[Any, Any]: if size is None: return self._combiner(self._first.generate(None, key, context), self._second.generate(None, key, context)) return [ self._combiner(f, s) for (f, s) in zip(self._first.generate(size, key, context), self._second.generate(size, key, context)) ] def result_type(self) -> type: return self._out_type def save(self) -> Any: return (self._name, (self._first.save(), self._second.save())) @classmethod def build(cls, builder: Builder, spec): if not isinstance(spec, (tuple, list)) or len(spec) != 2: raise ValueError(f'{cls} expects two underlying arguments') # pylint: disable=no-value-for-parameter return cls(builder.rand, BuildGenerator(builder, spec[0]), BuildGenerator(builder, spec[1])) class DateGenerator(CombineGenerator): """Generates dates with a start date, and a delta.""" def __init__(self, rand: numpy.random.Generator, start_date: Generator, delta_days: Generator): super().__init__( rand, start_date, delta_days, Names.DATE, lambda start, delta: start + datetime.timedelta(days=int(delta)), [datetime.date], [int], datetime.date) class DatetimeGenerator(CombineGenerator): """Generates datetimes with a start timestamp, and a delta in seconds.""" def __init__(self, rand: numpy.random.Generator, start_datetime: Generator, delta_seconds: Generator): super().__init__( rand, start_datetime, delta_seconds, Names.DATETIME, lambda start, delta: start + datetime.timedelta(seconds=int(delta)), [datetime.datetime], [int], datetime.datetime) class StrOpGenerator(Generator): """Combines strings generated by the children.""" def __init__(self, rand: numpy.random.Generator, children: List[Generator], op: str, name: str, in_types: List[type], combiner: Callable[[str, List[any]], str]): super().__init__(rand) if (in_types is not None and any( not any(issubclass(child.result_type(), t) for t in in_types) for child in children)): raise ValueError( f'{type(self)} expecting {in_types} input types. ' f'Got {[child.result_type() for child in children]}') if not isinstance(op, str): raise ValueError( f'{type(self)} expecting string operand. Got `{op}`') self._children = children self._op = op self._name = name self._combiner = combiner def result_type(self) -> type: return str def save(self) -> Any: return (self._name, ([child.save() for child in self._children], self._op)) @classmethod def build(cls, builder: Builder, spec): if not isinstance(spec, (tuple, list)) or len(spec) != 2: raise ValueError(f'{cls} expects two underlying arguments') # pylint: disable=no-value-for-parameter return cls(builder.rand, [BuildGenerator(builder, s) for s in spec[0]], spec[1]) def generate(self, size: Optional[int] = None, key: Optional[str] = None, context: Optional[ContextBase] = None) -> str: if size is None: return self._combiner(self._op, [ child.generate(None, key, context) for child in self._children ]) return [ self._combiner(self._op, elems) for elems in zip(*[ child.generate(size, key, context) for child in self._children ]) ] class ConcatGenerator(StrOpGenerator): def __init__(self, rand: numpy.random.Generator, children: List[Generator], concat: str): super().__init__(rand, children, concat, Names.CONCAT, [str], lambda op, elem: op.join(elem)) class FormatGenerator(StrOpGenerator): def __init__(self, rand: numpy.random.Generator, children: List[Generator], formatter: str): super().__init__(rand, children, formatter, Names.FORMAT, None, lambda op, elem: op.format(*elem)) def _GenKey(key: str, base: Optional[str]): if base: return base + '.' + key return key class RecordGenerator(Generator): """Base class for generating records-like objects: dicts, classes, dataframes.""" def __init__(self, rand: numpy.random.Generator, generators: Dict[str, Generator], name: str): super().__init__(rand) self._generators = generators self._name = name self._with_context = False def with_context(self): """Turns on context for generated values, based on records generated by this.""" self._with_context = True return self def nested(self, key: str): """Access the internal generator for provided key.""" if key in self._generators: return self._generators[key] return None @classmethod def build(cls, builder: Builder, spec): if not isinstance(spec, dict): raise ValueError( f'{cls} expecting a dict specification. Got: {type(spec)}') generators = {} for k, v in spec.items(): if not isinstance(k, str): raise ValueError(f'{cls} expecting string keys. Got `{k}`') generator = builder.build_for_key(k, v) builder.add_generator(k, generator) generators[k] = generator # pylint: disable=no-value-for-parameter return cls(builder.rand, generators) def save(self) -> Any: return (self._name, {k: v.save() for
WHEN DELETE_ACTION = 1 THEN 'NO ACTION' WHEN DELETE_ACTION = 2 THEN 'CASCADE' ELSE 'SET NULL' END AS VARCHAR2(9)) AS DELETE_RULE, CASE WHEN A.ENABLE_FLAG = 1 THEN CAST('ENABLED' AS VARCHAR2(8)) ELSE CAST('DISABLED' AS VARCHAR2(8)) END AS STATUS, CAST('NOT DEFERRABLE' AS VARCHAR2(14)) AS DEFERRABLE, CAST('IMMEDIATE' AS VARCHAR2(9)) AS DEFERRED, CASE WHEN A.VALIDATE_FLAG = 1 THEN CAST('VALIDATED' AS VARCHAR2(13)) ELSE CAST('NOT VALIDATED' AS VARCHAR2(13)) END AS VALIDATED, CAST(NULL AS VARCHAR2(14)) AS "GENERATED", CAST(NULL AS VARCHAR2(3)) AS BAD, CASE WHEN A.RELY_FLAG = 1 THEN CAST('RELY' AS VARCHAR2(4)) ELSE CAST(NULL AS VARCHAR2(4)) END AS RELY, CAST(NULL AS DATE) AS LAST_CHANGE, CAST(NULL AS VARCHAR2(128)) AS INDEX_OWNER, CAST(NULL AS VARCHAR2(128)) AS INDEX_NAME, CAST(NULL AS VARCHAR2(7)) AS INVALID, CAST(NULL AS VARCHAR2(14)) AS VIEW_RELATED FROM SYS.ALL_VIRTUAL_FOREIGN_KEY_REAL_AGENT A, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT B, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C WHERE A.CHILD_TABLE_ID = B.TABLE_ID AND B.DATABASE_ID = C.DATABASE_ID AND A.REF_CST_TYPE = 0 AND A.REF_CST_ID = -1 AND A.TENANT_ID = EFFECTIVE_TENANT_ID() AND B.TENANT_ID = EFFECTIVE_TENANT_ID() AND C.TENANT_ID = EFFECTIVE_TENANT_ID() AND (B.DATABASE_ID = USERENV('SCHEMAID') OR USER_CAN_ACCESS_OBJ(1, B.TABLE_ID, B.DATABASE_ID) = 1) UNION ALL SELECT CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS OWNER, CAST(A.FOREIGN_KEY_NAME AS VARCHAR2(128)) AS CONSTRAINT_NAME, CAST('R' AS VARCHAR2(1)) AS CONSTRAINT_TYPE, CAST(B.TABLE_NAME AS VARCHAR2(128)) AS TABLE_NAME, CAST(NULL AS VARCHAR2(4000)) AS SEARCH_CONDITION, CAST(E.DATABASE_NAME AS VARCHAR2(128)) AS R_OWNER, CAST(CASE WHEN A.REF_CST_TYPE = 2 THEN SUBSTR(F.TABLE_NAME, 7 + INSTR(SUBSTR(F.TABLE_NAME, 7), '_')) ELSE NULL END AS VARCHAR2(128)) AS R_CONSTRAINT_NAME, CAST(CASE WHEN DELETE_ACTION = 1 THEN 'NO ACTION' WHEN DELETE_ACTION = 2 THEN 'CASCADE' ELSE 'SET NULL' END AS VARCHAR2(9)) AS DELETE_RULE, CASE WHEN A.ENABLE_FLAG = 1 THEN CAST('ENABLED' AS VARCHAR2(8)) ELSE CAST('DISABLED' AS VARCHAR2(8)) END AS STATUS, CAST('NOT DEFERRABLE' AS VARCHAR2(14)) AS DEFERRABLE, CAST('IMMEDIATE' AS VARCHAR2(9)) AS DEFERRED, CASE WHEN A.VALIDATE_FLAG = 1 THEN CAST('VALIDATED' AS VARCHAR2(13)) ELSE CAST('NOT VALIDATED' AS VARCHAR2(13)) END AS VALIDATED, CAST(NULL AS VARCHAR2(14)) AS "GENERATED", CAST(NULL AS VARCHAR2(3)) AS BAD, CASE WHEN A.RELY_FLAG = 1 THEN CAST('RELY' AS VARCHAR2(4)) ELSE CAST(NULL AS VARCHAR2(4)) END AS RELY, CAST(NULL AS DATE) AS LAST_CHANGE, CAST(NULL AS VARCHAR2(128)) AS INDEX_OWNER, CAST(NULL AS VARCHAR2(128)) AS INDEX_NAME, CAST(NULL AS VARCHAR2(7)) AS INVALID, CAST(NULL AS VARCHAR2(14)) AS VIEW_RELATED FROM SYS.ALL_VIRTUAL_FOREIGN_KEY_REAL_AGENT A, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT B, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT D, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT E, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT F WHERE A.CHILD_TABLE_ID = B.TABLE_ID AND B.DATABASE_ID = C.DATABASE_ID AND A.PARENT_TABLE_ID = D.TABLE_ID AND D.DATABASE_ID = E.DATABASE_ID AND (D.DATABASE_ID = USERENV('SCHEMAID') OR USER_CAN_ACCESS_OBJ(1, D.TABLE_ID, D.DATABASE_ID) = 1) AND (A.REF_CST_ID = F.TABLE_ID AND A.REF_CST_TYPE = 2) AND A.TENANT_ID = EFFECTIVE_TENANT_ID() AND B.TENANT_ID = EFFECTIVE_TENANT_ID() AND C.TENANT_ID = EFFECTIVE_TENANT_ID() AND D.TENANT_ID = EFFECTIVE_TENANT_ID() AND E.TENANT_ID = EFFECTIVE_TENANT_ID() AND F.TENANT_ID = EFFECTIVE_TENANT_ID() UNION ALL SELECT CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS OWNER, CAST(A.FOREIGN_KEY_NAME AS VARCHAR2(128)) AS CONSTRAINT_NAME, CAST('R' AS VARCHAR2(1)) AS CONSTRAINT_TYPE, CAST(B.TABLE_NAME AS VARCHAR2(128)) AS TABLE_NAME, CAST(NULL AS VARCHAR2(4000)) AS SEARCH_CONDITION, CAST(E.DATABASE_NAME AS VARCHAR2(128)) AS R_OWNER, CAST(CASE WHEN A.REF_CST_TYPE = 1 THEN F.CONSTRAINT_NAME ELSE NULL END AS VARCHAR2(128)) AS R_CONSTRAINT_NAME, CAST(CASE WHEN DELETE_ACTION = 1 THEN 'NO ACTION' WHEN DELETE_ACTION = 2 THEN 'CASCADE' ELSE 'SET NULL' END AS VARCHAR2(9)) AS DELETE_RULE, CASE WHEN A.ENABLE_FLAG = 1 THEN CAST('ENABLED' AS VARCHAR2(8)) ELSE CAST('DISABLED' AS VARCHAR2(8)) END AS STATUS, CAST('NOT DEFERRABLE' AS VARCHAR2(14)) AS DEFERRABLE, CAST('IMMEDIATE' AS VARCHAR2(9)) AS DEFERRED, CASE WHEN A.VALIDATE_FLAG = 1 THEN CAST('VALIDATED' AS VARCHAR2(13)) ELSE CAST('NOT VALIDATED' AS VARCHAR2(13)) END AS VALIDATED, CAST(NULL AS VARCHAR2(14)) AS "GENERATED", CAST(NULL AS VARCHAR2(3)) AS BAD, CASE WHEN A.RELY_FLAG = 1 THEN CAST('RELY' AS VARCHAR2(4)) ELSE CAST(NULL AS VARCHAR2(4)) END AS RELY, CAST(NULL AS DATE) AS LAST_CHANGE, CAST(NULL AS VARCHAR2(128)) AS INDEX_OWNER, CAST(NULL AS VARCHAR2(128)) AS INDEX_NAME, CAST(NULL AS VARCHAR2(7)) AS INVALID, CAST(NULL AS VARCHAR2(14)) AS VIEW_RELATED FROM SYS.ALL_VIRTUAL_FOREIGN_KEY_REAL_AGENT A, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT B, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT D, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT E, SYS.ALL_VIRTUAL_CONSTRAINT_REAL_AGENT F WHERE A.CHILD_TABLE_ID = B.TABLE_ID AND B.DATABASE_ID = C.DATABASE_ID AND A.PARENT_TABLE_ID = D.TABLE_ID AND D.DATABASE_ID = E.DATABASE_ID AND (D.DATABASE_ID = USERENV('SCHEMAID') OR USER_CAN_ACCESS_OBJ(1, D.TABLE_ID, D.DATABASE_ID) = 1) AND (A.PARENT_TABLE_ID = F.TABLE_ID AND A.REF_CST_TYPE = 1 AND F.CONSTRAINT_TYPE = 1 AND A.REF_CST_ID = F.CONSTRAINT_ID) AND A.TENANT_ID = EFFECTIVE_TENANT_ID() AND B.TENANT_ID = EFFECTIVE_TENANT_ID() AND C.TENANT_ID = EFFECTIVE_TENANT_ID() AND D.TENANT_ID = EFFECTIVE_TENANT_ID() AND E.TENANT_ID = EFFECTIVE_TENANT_ID() AND F.TENANT_ID = EFFECTIVE_TENANT_ID() UNION ALL SELECT CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS OWNER, CAST(CONSTRAINT_NAME AS VARCHAR2(128)) AS CONSTRAINT_NAME, CASE WHEN A.CONSTRAINT_TYPE = 1 THEN CAST('P' AS VARCHAR2(1)) ELSE CAST('C' AS VARCHAR2(1)) END AS CONSTRAINT_TYPE, CAST(B.TABLE_NAME AS VARCHAR2(128)) AS TABLE_NAME, CASE WHEN A.CONSTRAINT_TYPE = 1 THEN CAST(NULL AS VARCHAR2(4000)) ELSE CAST(A.CHECK_EXPR AS VARCHAR2(4000)) END AS SEARCH_CONDITION, CAST(NULL AS VARCHAR2(128)) AS R_OWNER, CAST(NULL AS VARCHAR2(128)) AS R_CONSTRAINT_NAME, CAST(NULL AS VARCHAR2(9)) AS DELETE_RULE, CASE WHEN A.ENABLE_FLAG = 1 THEN CAST('ENABLED' AS VARCHAR2(8)) ELSE CAST('DISABLED' AS VARCHAR2(8)) END AS STATUS, CAST('NOT DEFERRABLE' AS VARCHAR2(14)) AS DEFERRABLE, CAST('IMMEDIATE' AS VARCHAR2(9)) AS DEFERRED, CASE WHEN A.VALIDATE_FLAG = 1 THEN CAST('VALIDATED' AS VARCHAR2(13)) ELSE CAST('NOT VALIDATED' AS VARCHAR2(13)) END AS VALIDATED, CAST(NULL AS VARCHAR2(14)) AS "GENERATED", CAST(NULL AS VARCHAR2(3)) AS BAD, CASE WHEN A.RELY_FLAG = 1 THEN CAST('RELY' AS VARCHAR2(4)) ELSE CAST(NULL AS VARCHAR2(4)) END AS RELY, CAST(NULL AS DATE) AS LAST_CHANGE, CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS INDEX_OWNER, CASE WHEN A.CONSTRAINT_TYPE = 1 THEN CAST(A.CONSTRAINT_NAME AS VARCHAR2(128)) ELSE CAST(NULL AS VARCHAR2(128)) END AS INDEX_NAME, CAST(NULL AS VARCHAR2(7)) AS INVALID, CAST(NULL AS VARCHAR2(14)) AS VIEW_RELATED FROM SYS.ALL_VIRTUAL_CONSTRAINT_REAL_AGENT A, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT B, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C WHERE A.TABLE_ID = B.TABLE_ID AND B.DATABASE_ID = C.DATABASE_ID AND (B.DATABASE_ID = USERENV('SCHEMAID') OR USER_CAN_ACCESS_OBJ(1, B.TABLE_ID, B.DATABASE_ID) = 1) AND C.DATABASE_NAME != '__recyclebin' AND A.TENANT_ID = EFFECTIVE_TENANT_ID() AND B.TENANT_ID = EFFECTIVE_TENANT_ID() AND C.TENANT_ID = EFFECTIVE_TENANT_ID() """.replace("\n", " ") ) def_table_schema( table_name = 'USER_CONSTRAINTS', database_id = 'OB_ORA_SYS_DATABASE_ID', table_id = '25017', table_type = 'SYSTEM_VIEW', rowkey_columns = [], normal_columns = [], gm_columns = [], in_tenant_space = True, view_definition = """ SELECT CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS OWNER, CAST(SUBSTR(A.TABLE_NAME, 7 + INSTR(SUBSTR(A.TABLE_NAME, 7), '_')) AS VARCHAR2(128)) AS CONSTRAINT_NAME, CAST('U' AS VARCHAR2(1)) AS CONSTRAINT_TYPE, CAST(B.TABLE_NAME AS VARCHAR2(128)) AS TABLE_NAME, CAST(NULL AS VARCHAR2(4000)) AS SEARCH_CONDITION, CAST(NULL AS VARCHAR2(128)) AS R_OWNER, CAST(NULL AS VARCHAR2(128)) AS R_CONSTRAINT_NAME, CAST(NULL AS VARCHAR2(9)) AS DELETE_RULE, CAST('ENABLED' AS VARCHAR2(8)) AS STATUS, CAST('NOT DEFERRABLE' AS VARCHAR2(14)) AS DEFERRABLE, CAST('IMMEDIATE' AS VARCHAR2(9)) AS DEFERRED, CAST('VALIDATED' AS VARCHAR2(13)) AS VALIDATED, CAST(NULL AS VARCHAR2(14)) AS "GENERATED", CAST(NULL AS VARCHAR2(3)) AS BAD, CAST(NULL AS VARCHAR2(4)) AS RELY, CAST(NULL AS DATE) AS LAST_CHANGE, CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS INDEX_OWNER, CAST(SUBSTR(A.TABLE_NAME, 7 + INSTR(SUBSTR(A.TABLE_NAME, 7), '_')) AS VARCHAR2(128)) AS INDEX_NAME, CAST(NULL AS VARCHAR2(7)) AS INVALID, CAST(NULL AS VARCHAR2(14)) AS VIEW_RELATED FROM SYS.ALL_VIRTUAL_TABLE_REAL_AGENT A, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT B, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C WHERE A.DATA_TABLE_ID = B.TABLE_ID AND A.DATABASE_ID = C.DATABASE_ID AND A.DATABASE_ID = USERENV('SCHEMAID') AND A.INDEX_TYPE IN (2, 4, 8) AND A.TENANT_ID = EFFECTIVE_TENANT_ID() AND B.TENANT_ID = EFFECTIVE_TENANT_ID() AND C.TENANT_ID = EFFECTIVE_TENANT_ID() UNION ALL SELECT CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS OWNER, CAST(A.FOREIGN_KEY_NAME AS VARCHAR2(128)) AS CONSTRAINT_NAME, CAST('R' AS VARCHAR2(1)) AS CONSTRAINT_TYPE, CAST(B.TABLE_NAME AS VARCHAR2(128)) AS TABLE_NAME, CAST(NULL AS VARCHAR2(4000)) AS SEARCH_CONDITION, CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS R_OWNER, CAST(NULL AS VARCHAR2(128)) AS R_CONSTRAINT_NAME, CAST(CASE WHEN DELETE_ACTION = 1 THEN 'NO ACTION' WHEN DELETE_ACTION = 2 THEN 'CASCADE' ELSE 'SET NULL' END AS VARCHAR2(9)) AS DELETE_RULE, CASE WHEN A.ENABLE_FLAG = 1 THEN CAST('ENABLED' AS VARCHAR2(8)) ELSE CAST('DISABLED' AS VARCHAR2(8)) END AS STATUS, CAST('NOT DEFERRABLE' AS VARCHAR2(14)) AS DEFERRABLE, CAST('IMMEDIATE' AS VARCHAR2(9)) AS DEFERRED, CASE WHEN A.VALIDATE_FLAG = 1 THEN CAST('VALIDATED' AS VARCHAR2(13)) ELSE CAST('NOT VALIDATED' AS VARCHAR2(13)) END AS VALIDATED, CAST(NULL AS VARCHAR2(14)) AS "GENERATED", CAST(NULL AS VARCHAR2(3)) AS BAD, CAST(NULL AS VARCHAR2(4)) AS RELY, CAST(NULL AS DATE) AS LAST_CHANGE, CAST(NULL AS VARCHAR2(128)) AS INDEX_OWNER, CAST(NULL AS VARCHAR2(128)) AS INDEX_NAME, CAST(NULL AS VARCHAR2(7)) AS INVALID, CAST(NULL AS VARCHAR2(14)) AS VIEW_RELATED FROM SYS.ALL_VIRTUAL_FOREIGN_KEY_REAL_AGENT A, SYS.ALL_VIRTUAL_TABLE_REAL_AGENT B, SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C WHERE A.CHILD_TABLE_ID = B.TABLE_ID AND B.DATABASE_ID = C.DATABASE_ID AND A.REF_CST_TYPE = 0 AND A.REF_CST_ID = -1 AND B.DATABASE_ID = USERENV('SCHEMAID') AND A.TENANT_ID = EFFECTIVE_TENANT_ID() AND B.TENANT_ID = EFFECTIVE_TENANT_ID() AND C.TENANT_ID = EFFECTIVE_TENANT_ID() UNION ALL SELECT CAST(C.DATABASE_NAME AS VARCHAR2(128)) AS OWNER, CAST(A.FOREIGN_KEY_NAME AS VARCHAR2(128)) AS CONSTRAINT_NAME, CAST('R' AS VARCHAR2(1)) AS CONSTRAINT_TYPE, CAST(B.TABLE_NAME AS VARCHAR2(128)) AS TABLE_NAME, CAST(NULL AS VARCHAR2(4000)) AS SEARCH_CONDITION, CAST(E.DATABASE_NAME AS VARCHAR2(128)) AS R_OWNER, CAST(CASE WHEN A.REF_CST_TYPE = 2 THEN SUBSTR(F.TABLE_NAME, 7 + INSTR(SUBSTR(F.TABLE_NAME, 7), '_')) ELSE NULL END AS VARCHAR2(128)) AS R_CONSTRAINT_NAME, CAST(CASE WHEN DELETE_ACTION = 1 THEN 'NO ACTION' WHEN DELETE_ACTION =
<reponame>stolau/oty_ilmo from werkzeug.security import generate_password_hash, check_password_hash from flask import Flask, render_template, url_for, redirect, request, flash, send_from_directory, session from flask_httpauth import HTTPBasicAuth from app import app, db from flask_mail import Mail, Message from datetime import datetime, date, time, timedelta import requests from sqlalchemy import and_ from app.forms import ots2021Form, fuksisitsitForm, tupsufuksisitsitForm, fuksibeerpongForm, beerpongForm, aitiForm, hvsitsituusiForm, syysuitotForm, prositsitForm, simailutForm, kotiruokakurssi1Form, kotiruokakurssi2Form from app.models import ots2021Model, fuksisitsitModel, tupsufuksisitsitModel, fuksibeerpongModel, beerpongModel, aitiModel, hvsitsituusiModel, syysuitotModel, prositsitModel, simailutModel, kotiruokakurssi1Model, kotiruokakurssi2Model from flask_wtf.csrf import CSRFProtect, CSRFError import os from app import sqlite_to_csv from werkzeug.datastructures import MultiDict from urllib.parse import urlparse, urlunparse import subprocess import time import json auth = HTTPBasicAuth() csrf = CSRFProtect() try: file = open("auth.conf", "r") lines = file.readlines() password = <PASSWORD>) print("created default user") print("username: admin") print("password: " + str(password)) users = { "admin": generate_password_hash(password) } roles = { "admin": "admin" } for line in lines: new_user = line.split(",", 6) users[new_user[0]] = generate_password_hash(new_user[1]) roles[new_user[0]] = new_user[2:6] except FileNotFoundError as e: print(e) password = os.urandom(64) print("auth.conf not found, created default user") print("username: admin") print("password: " + str(password)) users = { "admin": generate_password_hash(password) } roles = { "admin": "admin" } else: file.close() KAPSI = False try: file = open("routes.conf", "r") lines = file.readlines() for line in lines: conf_line = line.split(":", 2) if "kapsi" in conf_line[0]: if "true" in conf_line[1]: KAPSI = True else: KAPSI = False except FileNotFoundError as e: print(e) print("routes.conf not found") else: file.close() GMAIL = True if GMAIL: mail_settings = { "MAIL_SERVER": 'smtp.gmail.com', "MAIL_PORT": 465, "MAIL_USE_TLS": False, "MAIL_USE_SSL": True, "MAIL_USERNAME": '<EMAIL>', "MAIL_PASSWORD": '<PASSWORD>' } app.config.update(mail_settings) mail = Mail(app) @auth.verify_password def verify_password(username, password): if username in users and \ check_password_hash(users.get(username), password): return username @auth.get_user_roles def get_user_roles(user): return roles.get(user) @app.route('/') def index(): return render_template('index.html', title='OTY:n ilmot', page="index") @app.route('/ots2021', methods=['GET', 'POST']) def ots2021(): form = ots2021Form() starttime = datetime(2021, 3, 5, 12, 00, 00) endtime = datetime(2021, 3, 12, 23, 59, 59) nowtime = datetime.now() limit = 4000 maxlimit = 4000 entrys = ots2021Model.query.all() count = ots2021Model.query.count() for entry in entrys: if((entry.etunimi == form.etunimi.data and entry.sukunimi == form.sukunimi.data) or entry.email == form.email.data): flash('Olet jo ilmoittautunut') return render_template('ots2021.html', title='ots2021 ilmoittautuminen', entrys=entrys, count=count, starttime=starttime, endtime=endtime, nowtime=nowtime, limit=limit, form=form, page="ots2021") if request.method == 'POST': validate = form.validate_on_submit() submitted = form.is_submitted() else: validate = False submitted = False if validate and submitted and count <= maxlimit: flash('Ilmoittautuminen onnistui') sub = ots2021Model( etunimi = form.etunimi.data, sukunimi = form.sukunimi.data, email = form.email.data, kilta = form.kilta.data, consent1 = form.consent1.data, consent0 = form.consent0.data, datetime = nowtime, ) db.session.add(sub) db.session.commit() return render_template('ots2021_redirect.html') elif submitted and count > maxlimit: flash('IlmoittIlmoittautuminen on jo täynnä') elif (not validate) and submitted: flash('Ilmoittautuminen epäonnistui, tarkista syöttämäsi tiedot') return render_template('ots2021.html', title='ots2021 ilmottautuminen', entrys=entrys, count=count, starttime=starttime, endtime=endtime, nowtime=nowtime, limit=limit, form=form, page="ots2021") @app.route('/ots2021_data', methods=['GET']) @auth.login_required(role=['admin', 'meea']) def ots2021_data(): limit = 4000 entries = ots2021Model.query.all() count = ots2021Model.query.count() return render_template('ots2021_data.html', title='ots2021 data', entries=entries, count=count, limit=limit) @app.route('/ots2021_data/ots2021Model.csv') @auth.login_required(role=['admin', 'meea']) def ots2021_csv(): os.system('mkdir csv') sqlite_to_csv.exportToCSV('ots2021_model') dir = os.path.join(os.getcwd(), 'csv/') try: return send_from_directory(directory=dir, filename='ots2021_model_data.csv', as_attachment=True) except FileNotFoundError as e: abort(404) @app.route('/kysely_arvonta_juttu_data', methods=['GET']) @auth.login_required(role=['admin', 'kysely_arvonta_juttu']) def kysely_arvonta_juttu_data(): limit = 4000 entries = kysely_arvonta_juttuModel.query.all() count = kysely_arvonta_juttuModel.query.count() return render_template('kysely_arvonta_juttu_data.html', title='kysely_arvonta_juttu data', entries=entries, count=count, limit=limit) @app.route('/kysely_arvonta_juttu_data/kysely_arvonta_juttu_model_data.csv') @auth.login_required(role=['admin', 'kysely_arvonta_juttu']) def kysely_arvonta_juttu_csv(): os.system('mkdir csv') sqlite_to_csv.exportToCSV('kysely_arvonta_juttu_model') dir = os.path.join(os.getcwd(), 'csv/') try: print(dir) return send_from_directory(directory=dir, filename='kysely_arvonta_juttu_model_data.csv', as_attachment=True) except FileNotFoundError as e: print(e) abort(404) @app.route('/fuksisitsit', methods=['GET', 'POST']) def fuksisitsit(): form = fuksisitsitForm() starttime = datetime(2021, 4, 9, 15, 00, 00) endtime = datetime(2021, 4, 15, 23, 59, 59) nowtime = datetime.now() limit = 4000 count = 0 entrys = fuksisitsitModel.query.all() totalCount = 0 for entry in entrys: totalCount += 1 for entry in entrys: if(entry.etunimi0 == form.etunimi0.data) and entry.sukunimi0 == form.sukunimi0.data: flash('Olet jo ilmoittautunut') return render_template('/2021/fuksisitsit.html', title='Fuksisitsit', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="fuksisitsit") if request.method == 'POST': validate = form.validate_on_submit() submitted = form.is_submitted() else: validate = False submitted = False if validate and submitted and limit > totalCount: flash('Ilmoittautuminen onnistui') sub = fuksisitsitModel( membercount = form.membercount.data, etunimi0 = form.etunimi0.data, sukunimi0 = form.sukunimi0.data, phone0 = form.phone0.data, email0 = form.email0.data, kilta0 = form.kilta0.data, vapaavalinta0 = form.vapaavalinta0.data, consent0 = form.consent0.data, consent1 = form.consent1.data, consent2 = form.consent2.data, datetime = nowtime ) db.session.add(sub) db.session.commit() return redirect(url_for('fuksisitsit')) elif submitted and limit < totalCount: totalCount -= 1 flash('Ilmoittautuminen on jo täynnä') elif (not validate) and submitted: flash('Ilmoittautuminen epäonnistui, tarkista syöttämäsi tiedot') return render_template('/2021/fuksisitsit.html', title='Fuksisitsit', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="fuksisitsit") @app.route('/fuksisitsit_data', methods=['GET']) @auth.login_required(role=['admin', 'ella', 'anssi', 'meea']) def fuksisitsit_data(): limit = 4000 entries = fuksisitsitModel.query.all() count = fuksisitsitModel.query.count() return render_template('/2021/fuksisitsit_data.html', title='fuksisitsit data', entries=entries, count=count, limit=limit) @app.route('/fuksisitsit/fuksisitsit_model_data.csv') @auth.login_required(role=['admin', 'ella']) def fuksisitsit_csv(): os.system('mkdir csv') sqlite_to_csv.exportToCSV('fuksisitsit_model') dir = os.path.join(os.getcwd(), 'csv/') try: print(dir) return send_from_directory(directory=dir, filename='fuksisitsit_model_data.csv', as_attachment=True) except FileNotFoundError as e: print(e) abort(404) @app.route('/tupsufuksisitsit', methods=['GET', 'POST']) def tupsufuksisitsit(): form = tupsufuksisitsitForm() starttime = datetime(2021, 4, 9, 15, 00, 00) endtime = datetime(2021, 4, 14, 23, 59, 59) nowtime = datetime.now() limit = 4000 count = 0 entrys = tupsufuksisitsitModel.query.all() totalCount = 0 for entry in entrys: totalCount += 1 for entry in entrys: if(entry.etunimi0 == form.etunimi0.data) and entry.sukunimi0 == form.sukunimi0.data: flash('Olet jo ilmoittautunut') return render_template('/2021/tupsufuksisitsit.html', title='TupsuFuksisitsit', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="tupsufuksisitsit") if request.method == 'POST': validate = form.validate_on_submit() submitted = form.is_submitted() else: validate = False submitted = False if validate and submitted and limit > totalCount: flash('Ilmoittautuminen onnistui') sub = tupsufuksisitsitModel( membercount = form.membercount.data, etunimi0 = form.etunimi0.data, sukunimi0 = form.sukunimi0.data, phone0 = form.phone0.data, email0 = form.email0.data, kilta0 = form.kilta0.data, vapaavalinta0 = form.vapaavalinta0.data, consent0 = form.consent0.data, consent1 = form.consent1.data, consent2 = form.consent2.data, datetime = nowtime ) db.session.add(sub) db.session.commit() return redirect(url_for('tupsufuksisitsit')) elif submitted and limit < totalCount: totalCount -= 1 flash('Ilmoittautuminen on jo täynnä') elif (not validate) and submitted: flash('Ilmoittautuminen epäonnistui, tarkista syöttämäsi tiedot') return render_template('/2021/tupsufuksisitsit.html', title='TupsuFuksisitsit', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="tupsufuksisitsit") @app.route('/tupsufuksisitsit_data', methods=['GET']) @auth.login_required(role=['admin', 'ella', 'anssi', 'meea']) def tupsufuksisitsit_data(): limit = 4000 entries = tupsufuksisitsitModel.query.all() count = tupsufuksisitsitModel.query.count() return render_template('/2021/tupsufuksisitsit_data.html', title='tupsufuksisitsit data', entries=entries, count=count, limit=limit) @app.route('/tupsufuksisitsit/tupsufuksisitsit_model_data.csv') @auth.login_required(role=['admin', 'ella']) def tupsufuksisitsit_csv(): os.system('mkdir csv') sqlite_to_csv.exportToCSV('tupsufuksisitsit_model') dir = os.path.join(os.getcwd(), 'csv/') try: print(dir) return send_from_directory(directory=dir, filename='tupsufuksisitsit_model_data.csv', as_attachment=True) except FileNotFoundError as e: print(e) abort(404) @app.route('/fuksibeerpong', methods=['GET', 'POST']) def fuksibeerpong(): form = fuksibeerpongForm() starttime = datetime(2021, 4, 13, 12, 00, 00) endtime = datetime(2021, 4, 18, 23, 59, 59) nowtime = datetime.now() limit = 32 count = 0 entrys = fuksibeerpongModel.query.all() totalCount = 0 for entry in entrys: totalCount += 1 for entry in entrys: if(entry.joukkue == form.joukkue.data): flash('Olet jo ilmoittautunut') return render_template('/2021/fuksibeerpong.html', title='Fuksibeerpong', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="fuksibeerpong") if request.method == 'POST': validate = form.validate_on_submit() submitted = form.is_submitted() else: validate = False submitted = False if validate and submitted and limit > totalCount: flash('Ilmoittautuminen onnistui') sub = fuksibeerpongModel( joukkue = form.joukkue.data, etunimi0 = form.etunimi0.data, sukunimi0 = form.sukunimi0.data, phone0 = form.phone0.data, email0 = form.email0.data, etunimi1 = form.etunimi1.data, sukunimi1 = form.sukunimi1.data, consent0 = form.consent0.data, consent1 = form.consent1.data, consent2 = form.consent2.data, datetime = nowtime ) db.session.add(sub) db.session.commit() return redirect(url_for('fuksibeerpong')) elif submitted and limit < totalCount: totalCount -= 1 flash('Ilmoittautuminen on jo täynnä') elif (not validate) and submitted: flash('Ilmoittautuminen epäonnistui, tarkista syöttämäsi tiedot') return render_template('/2021/fuksibeerpong.html', title='Fuksibeerpong', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="fuksibeerpong") @app.route('/fuksibeerpong_data', methods=['GET']) @auth.login_required(role=['admin', 'nikita', 'anssi']) def fuksibeerpong_data(): limit = 32 entries = fuksibeerpongModel.query.all() count = fuksibeerpongModel.query.count() return render_template('/2021/fuksibeerpong_data.html', title='fuksibeerpong data', entries=entries, count=count, limit=limit) @app.route('/fuksibeerpong/fuksibeerpong_model_data.csv') @auth.login_required(role=['admin', 'nikita', 'anssi']) def fuksibeerpong_csv(): os.system('mkdir csv') sqlite_to_csv.exportToCSV('fuksibeerpong_model') dir = os.path.join(os.getcwd(), 'csv/') try: print(dir) return send_from_directory(directory=dir, filename='fuksibeerpong_model_data.csv', as_attachment=True) except FileNotFoundError as e: print(e) abort(404) @app.route('/beerpong', methods=['GET', 'POST']) def beerpong(): form = beerpongForm() starttime = datetime(2021, 4, 13, 16, 00, 00) endtime = datetime(2021, 4, 19, 23, 59, 59) nowtime = datetime.now() limit = 32 count = 0 entrys = beerpongModel.query.all() totalCount = 0 for entry in entrys: totalCount += 1 for entry in entrys: if(entry.joukkue == form.joukkue.data): flash('Olet jo ilmoittautunut') return render_template('/2021/beerpong.html', title='Beerpong', entrys=entrys, starttime=starttime, endtime=endtime, nowtime=nowtime, form=form, totalCount=totalCount, limit=limit, page="beerpong") if request.method == 'POST': validate = form.validate_on_submit() submitted = form.is_submitted() else: validate = False submitted = False if validate and submitted and limit > totalCount: flash('Ilmoittautuminen onnistui') sub = beerpongModel( joukkue = form.joukkue.data, etunimi0 = form.etunimi0.data, sukunimi0 = form.sukunimi0.data, phone0 = form.phone0.data, email0 = form.email0.data, etunimi1 = form.etunimi1.data, sukunimi1 = form.sukunimi1.data, consent0 = form.consent0.data, consent1 = form.consent1.data, consent2 = form.consent2.data, datetime = nowtime ) db.session.add(sub) db.session.commit() return redirect(url_for('beerpong')) elif submitted and limit < totalCount: totalCount -= 1 flash('Ilmoittautuminen on jo täynnä') elif (not validate) and submitted: flash('Ilmoittautuminen epäonnistui, tarkista syöttämäsi
model_names col_names = [f"({i + 1})" for i in range(len(model_names))] return col_names, col_groups def _customize_col_groups(default_col_groups, custom_col_groups): """Change default (inferred) column group titles using custom column groups. Args: default_col_groups (list or NoneType): The inferred column groups. custom_col_groups (list or dict): Dictionary mapping defautl column group titles to custom column group titles, if the defautl column groups are defined. Must be a list of the same lenght as models otherwise. Returns: col_groups (list): Column groups to display in estimation table. """ if custom_col_groups: if not default_col_groups: if not isinstance(custom_col_groups, list): raise ValueError( """With unique model names, multiple models can't be grouped under common group name. Provide list of unique group names instead, if you wish to add column level.""" ) col_groups = custom_col_groups else: if isinstance(custom_col_groups, list): col_groups = custom_col_groups elif isinstance(custom_col_groups, dict): col_groups = ( pd.Series(default_col_groups).replace(custom_col_groups).to_list() ) else: raise TypeError( f"""Invalid type for custom_col_groups. Can be either list or dictionary, or NoneType. Not: {type(col_groups)}.""" ) else: col_groups = default_col_groups return col_groups def _customize_col_names(default_col_names, custom_col_names): """Change default (inferred) column names using custom column names. Args: deafult_col_names (list): The default (inferred) column names. custom_col_names (list or dict): Dictionary mapping default column names to custom column names, or list to display as the name of each model column. Returns: column_names (list): The column names to display in the estimatino table. """ if not custom_col_names: col_names = default_col_names elif isinstance(custom_col_names, dict): col_names = list(pd.Series(default_col_names).replace(custom_col_names)) elif isinstance(custom_col_names, list): if not len(custom_col_names) == len(default_col_names): raise ValueError( f"""If provided as a list, custom_col_names should have same length as default_col_names. Lenght of custom_col_names {len(custom_col_names)} !=length of default_col_names {len(default_col_names)}""" ) elif any(isinstance(i, list) for i in custom_col_names): raise ValueError("Custom_col_names cannot be a nested list") col_names = custom_col_names else: raise TypeError( f"""Invalid type for custom_col_names. Can be either list or dictionary, or NoneType. Not: {col_names}.""" ) return col_names def _create_group_to_col_position(column_groups): """Get mapping from column groups to column positions. Args: column_names (list): The column groups to display in the estimatino table. Returns: group_to_col_index(dict): The mapping from column group titles to column positions. """ if column_groups is not None: group_to_col_index = {group: [] for group in list(set(column_groups))} for i, group in enumerate(column_groups): group_to_col_index[group].append(i) else: group_to_col_index = None return group_to_col_index def _convert_frame_to_string_series( df, significance_levels, show_stars, ): """Return processed value series with significance stars and inference information. Args: df (DataFrame): params DataFrame of the model significance_levels (list): see main docstring number_format (int): see main docstring show_inference (bool): see main docstring confidence_intervals (bool): see main docstring show_stars (bool): see main docstring Returns: sr (pd.Series): string series with values and inferences. """ value_sr = df["value"] if show_stars: sig_bins = [-1] + sorted(significance_levels) + [2] value_sr += "$^{" value_sr += ( pd.cut( df["p_value"], bins=sig_bins, labels=[ "*" * (len(significance_levels) - i) for i in range(len(significance_levels) + 1) ], ) .astype("str") .replace("nan", "") .replace(np.nan, "") ) value_sr += " }$" if "ci_lower" in df: ci_lower = df["ci_lower"] ci_upper = df["ci_upper"] inference_sr = "(" inference_sr += ci_lower inference_sr += r";" inference_sr += ci_upper inference_sr += ")" sr = _combine_series(value_sr, inference_sr) elif "standard_error" in df: standard_error = df["standard_error"] inference_sr = "(" + standard_error + ")" sr = _combine_series(value_sr, inference_sr) else: sr = value_sr # replace empty braces with empty string sr = sr.where(sr.apply(lambda x: bool(re.search(r"\d", x))), "") sr.name = "" return sr def _combine_series(value_sr, inference_sr): """Merge value and inference series. Return string series with parameter values and precision values below respective param values. Args: values_sr (Series): string series of estimated parameter values inference_sr (Series): string series of inference values Returns: series: combined string series of param and inference values """ value_df = value_sr.to_frame(name="") original_cols = value_df.columns value_df.reset_index(drop=False, inplace=True) index_names = [item for item in value_df.columns if item not in original_cols] # set the index to even numbers, starting at 0 value_df.index = value_df.index * 2 inference_df = inference_sr.to_frame(name="") inference_df.reset_index(drop=False, inplace=True) # set the index to odd numbers, starting at 1 inference_df.index = (inference_df.index * 2) + 1 inference_df[index_names[-1]] = "" df = pd.concat([value_df, inference_df]).sort_index() df.set_index(index_names, inplace=True, drop=True) return df[""] def _create_statistics_sr( model, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ): """Process statistics values, return string series. Args: model (estimation result): see main docstring stats_options (dict): see main docstring significance_levels (list): see main docstring show_stars (bool): see main docstring number_format (int): see main focstring Returns: series: string series with summary statistics values and additional info if applicable. """ stats_values = {} stats_options = deepcopy(stats_options) if "show_dof" in stats_options: show_dof = stats_options.pop("show_dof") else: show_dof = None for k in stats_options: if k not in ["n_obs", "nobs"]: stats_values[stats_options[k]] = model.info.get(k, np.nan) raw_formatted = _apply_number_format( pd.DataFrame(pd.Series(stats_values)), number_format ) if add_trailing_zeros: formatted = _apply_number_format(raw_formatted, max_trail) else: formatted = raw_formatted stats_values = formatted.to_dict()[0] if "n_obs" in stats_options: n_obs = model.info.get("n_obs", np.nan) if not np.isnan(n_obs): n_obs = int(n_obs) stats_values[stats_options["n_obs"]] = n_obs elif "nobs" in stats_options: n_obs = model.info.get("nobs", np.nan) if not np.isnan(n_obs): n_obs = int(n_obs) stats_values[stats_options["nobs"]] = n_obs if "fvalue" in model.info and "F Statistic" in stats_values: if show_stars and "f_pvalue" in model.info: sig_bins = [-1] + sorted(significance_levels) + [2] sig_icon_fstat = "*" * ( len(significance_levels) - np.digitize(model.info["f_pvalue"], sig_bins) + 1 ) stats_values["F Statistic"] = ( stats_values["F Statistic"] + "$^{" + sig_icon_fstat + "}$" ) if show_dof: fstat_str = "{{{}(df={};{})}}" stats_values["F Statistic"] = fstat_str.format( stats_values["F Statistic"], int(model.info["df_model"]), int(model.info["df_resid"]), ) if "resid_std_err" in model.info and "Residual Std. Error" in stats_values: if show_dof: rse_str = "{{{}(df={})}}" stats_values["Residual Std. Error"] = rse_str.format( stats_values["Residual Std. Error"], int(model.info["df_resid"]) ) stat_sr = pd.Series(stats_values) # the follwing is to make sure statistics dataframe has as many levels of # indices as the parameters dataframe. stat_ind = np.empty((len(stat_sr), model.params.index.nlevels - 1), dtype=str) stat_ind = np.concatenate( [stat_sr.index.values.reshape(len(stat_sr), 1), stat_ind], axis=1 ).T stat_sr.index = pd.MultiIndex.from_arrays(stat_ind) return stat_sr.astype("str").replace("nan", "") def _process_frame_indices( df, custom_param_names, custom_index_names, show_col_names, show_col_groups, column_names, column_groups, ): """Process body DataFrame, customize the header. Args: df (DataFrame): string DataFrame with parameter values and inferences. custom_param_names (dict): see main docstring custom_index_names (list): see main docstring show_col_names (bool): see main docstring column_names (list): List of column names to display in estimation table. column_groups (list): List of column group titles to display in estimation table. Returns: processed_df (DataFrame): string DataFrame with customized header. """ # The column names of the df are empty strings. # If show_col_names is True, rename columns using column_names. # Add column level if show col_groups is True. if show_col_names: if show_col_groups: df.columns = pd.MultiIndex.from_tuples( [(i, j) for i, j in zip(column_groups, column_names)] ) else: df.columns = column_names if custom_index_names: if isinstance(custom_index_names, list): df.index.names = custom_index_names elif isinstance(custom_index_names, dict): df.rename_axis(index=custom_index_names, inplace=True) else: TypeError( f"""Invalid custom_index_names can be of type either list or dict, or NoneType. Not: {type(custom_index_names)}.""" ) if custom_param_names: ind = df.index.to_frame() ind = ind.replace(custom_param_names) df.index = pd.MultiIndex.from_frame(ind) return df def _generate_notes_latex( append_notes, notes_label, significance_levels, custom_notes, df ): """Generate the LaTex script of the notes section. Args: append_notes (bool): see main docstring notes_label (str): see main docstring significance_levels (list): see main docstring custom_notes (str): see main docstring df (DataFrame): params DataFrame of estimation model Returns: notes_latex (str): a string with LaTex script """ n_levels = df.index.nlevels n_columns = len(df.columns) significance_levels = sorted(significance_levels) notes_text = "" if append_notes: notes_text += "\\midrule\n" notes_text += "\\textit{{{}}} & \\multicolumn{{{}}}{{r}}{{".format( notes_label, str(n_columns + n_levels - 1) ) # iterate over penultimate significance_lelvels since last item of legend # is not followed by a semi column for i in range(len(significance_levels) - 1): star = "*" * (len(significance_levels) - i) notes_text += "$^{{{}}}$p$<${};".format(star, str(significance_levels[i])) notes_text += "$^{*}$p$<$" + str(significance_levels[-1]) + "} \\\\\n" if custom_notes: amp_n = "&" * n_levels if isinstance(custom_notes, list): if not all(isinstance(n, str) for n in custom_notes): raise ValueError( f"""Each custom note can only be of string type. The following notes: {[n for n in custom_notes if not type(n)==str]} are of types {[type(n) for n in custom_notes if not type(n)==str]} respectively.""" ) for n in custom_notes: notes_text += """ {}\\multicolumn{{{}}}{{r}}\\textit{{{}}}\\\\\n""".format( amp_n, n_columns, n ) elif isinstance(custom_notes, str): notes_text += "{}\\multicolumn{{{}}}{{r}}\\textit{{{}}}\\\\\n".format( amp_n, n_columns, custom_notes ) else: raise ValueError( f"""Custom notes can be either a string
<filename>smt/applications/tests/test_mixed_integer.py import unittest import numpy as np import matplotlib matplotlib.use("Agg") from smt.applications.mixed_integer import ( MixedIntegerContext, MixedIntegerSamplingMethod, FLOAT, ENUM, ORD, GOWER, check_xspec_consistency, unfold_xlimits_with_continuous_limits, fold_with_enum_index, unfold_with_enum_mask, compute_unfolded_dimension, cast_to_enum_value, cast_to_mixed_integer, cast_to_discrete_values, ) from smt.problems import Sphere from smt.sampling_methods import LHS from smt.surrogate_models import KRG, QP from smt.applications.mixed_integer import INT class TestMixedInteger(unittest.TestCase): ###INT DEPRECATED#### def test_qp_mixed_2D_INT(self): xtypes = [FLOAT, INT] xlimits = [[-10, 10], [-10, 10]] mixint = MixedIntegerContext(xtypes, xlimits) sm = mixint.build_surrogate_model(QP(print_prediction=False)) sampling = mixint.build_sampling_method(LHS, criterion="m") fun = Sphere(ndim=2) xt = sampling(10) yt = fun(xt) sm.set_training_values(xt, yt) sm.train() eq_check = True for i in range(xt.shape[0]): if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8: eq_check = False self.assertTrue(eq_check) def test_krg_mixed_3D_INT(self): xtypes = [FLOAT, (ENUM, 3), INT] xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] mixint = MixedIntegerContext(xtypes, xlimits) sm = mixint.build_surrogate_model(KRG(print_prediction=False)) sampling = mixint.build_sampling_method(LHS, criterion="m") fun = Sphere(ndim=3) xt = sampling(20) yt = fun(xt) sm.set_training_values(xt, yt) sm.train() eq_check = True for i in range(xt.shape[0]): if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8: eq_check = False if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2): eq_check = False self.assertTrue(eq_check) def test_check_xspec_consistency(self): xtypes = [FLOAT, (ENUM, 3), ORD] xlimits = [[-10, 10], ["blue", "red", "green"]] # Bad dimension with self.assertRaises(ValueError): check_xspec_consistency(xtypes, xlimits) xtypes = [FLOAT, (ENUM, 3), ORD] xlimits = [[-10, 10], ["blue", "red"], [-10, 10]] # Bad enum with self.assertRaises(ValueError): check_xspec_consistency(xtypes, xlimits) xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "4", "3"]], dtype="object", ) l = unfold_xlimits_with_continuous_limits(xtypes, xlimits) with self.assertRaises(ValueError): check_xspec_consistency(xtypes, xlimits) def test_krg_mixed_3D(self): xtypes = [FLOAT, (ENUM, 3), ORD] xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] mixint = MixedIntegerContext(xtypes, xlimits) sm = mixint.build_surrogate_model(KRG(print_prediction=False)) sampling = mixint.build_sampling_method(LHS, criterion="m") fun = Sphere(ndim=3) xt = sampling(20) yt = fun(xt) sm.set_training_values(xt, yt) sm.train() eq_check = True for i in range(xt.shape[0]): if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8: eq_check = False if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2): eq_check = False self.assertTrue(eq_check) def test_krg_mixed_3D_bad_regr(self): xtypes = [FLOAT, (ENUM, 3), ORD] xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] mixint = MixedIntegerContext(xtypes, xlimits) with self.assertRaises(ValueError): sm = mixint.build_surrogate_model( KRG(print_prediction=False, poly="linear") ) def test_qp_mixed_2D(self): xtypes = [FLOAT, ORD] xlimits = [[-10, 10], [-10, 10]] mixint = MixedIntegerContext(xtypes, xlimits) sm = mixint.build_surrogate_model(QP(print_prediction=False)) sampling = mixint.build_sampling_method(LHS, criterion="m") fun = Sphere(ndim=2) xt = sampling(10) yt = fun(xt) sm.set_training_values(xt, yt) sm.train() eq_check = True for i in range(xt.shape[0]): if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8: eq_check = False self.assertTrue(eq_check) def test_compute_unfolded_dimension(self): xtypes = [FLOAT, (ENUM, 2)] self.assertEqual(3, compute_unfolded_dimension(xtypes)) def test_unfold_with_enum_mask(self): xtypes = [FLOAT, (ENUM, 2)] x = np.array([[1.5, 1], [1.5, 0], [1.5, 1]]) expected = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]] self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist()) def test_unfold_with_enum_mask_with_enum_first(self): xtypes = [(ENUM, 2), FLOAT] x = np.array([[1, 1.5], [0, 1.5], [1, 1.5]]) expected = [[0, 1, 1.5], [1, 0, 1.5], [0, 1, 1.5]] self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist()) def test_fold_with_enum_index(self): xtypes = [FLOAT, (ENUM, 2)] x = np.array([[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]]) expected = [[1.5, 1], [1.5, 0], [1.5, 1]] self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) def test_fold_with_enum_index_with_list(self): xtypes = [FLOAT, (ENUM, 2)] expected = [[1.5, 1]] x = np.array([1.5, 0, 1]) self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) x = [1.5, 0, 1] self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) def test_cast_to_enum_value(self): xlimits = [[0.0, 4.0], ["blue", "red"]] x_col = 1 enum_indexes = [1, 1, 0, 1, 0] expected = ["red", "red", "blue", "red", "blue"] self.assertListEqual(expected, cast_to_enum_value(xlimits, x_col, enum_indexes)) def test_unfolded_xlimits_type(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 2), ORD] xlimits = np.array([[-5, 5], ["2", "3"], ["4", "5"], [0, 2]]) sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese") doe = sampling(10) self.assertEqual((10, 4), doe.shape) def test_cast_to_mixed_integer(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], dtype="object", ) x = np.array([1.5, 0, 2, 1.1]) self.assertEqual( [1.5, "blue", "long", 1], cast_to_mixed_integer(xtypes, xlimits, x) ) def test_unfold_xlimits_with_continuous_limits(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], dtype="object", ) l = unfold_xlimits_with_continuous_limits(xtypes, xlimits) self.assertEqual( np.array_equal( [[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 2]], unfold_xlimits_with_continuous_limits(xtypes, xlimits), ), True, ) def test_unfold_xlimits_with_continuous_limits_and_ordinal_values(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "3", "4"]], dtype="object", ) l = unfold_xlimits_with_continuous_limits(xtypes, xlimits) self.assertEqual( np.array_equal( [[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 4]], unfold_xlimits_with_continuous_limits(xtypes, xlimits), ), True, ) def test_cast_to_discrete_values(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 4]], dtype="object", ) x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) self.assertEqual( np.array_equal( np.array([[2.6, 0, 1, 0, 0, 1, 3]]), cast_to_discrete_values(xtypes, xlimits, None, x), ), True, ) def test_cast_to_discrete_values_with_smooth_rounding_ordinal_values(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "2", "4"]], dtype="object", ) self.assertEqual( np.array_equal( np.array([[2.6, 0, 1, 0, 0, 1, 4]]), cast_to_discrete_values(xtypes, xlimits, None, x), ), True, ) def test_cast_to_discrete_values_with_hard_rounding_ordinal_values(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "4"]], dtype="object", ) self.assertEqual( np.array_equal( np.array([[2.6, 0, 1, 0, 0, 1, 4]]), cast_to_discrete_values(xtypes, xlimits, None, x), ), True, ) def test_cast_to_discrete_values_with_non_integer_ordinal_values(self): xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), ORD] x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) xlimits = np.array( [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "3.5"]], dtype="object", ) self.assertEqual( np.array_equal( np.array([[2.6, 0, 1, 0, 0, 1, 3.5]]), cast_to_discrete_values(xtypes, xlimits, None, x), ), True, ) def run_mixed_integer_lhs_example(self): import numpy as np import matplotlib.pyplot as plt from matplotlib import colors from smt.sampling_methods import LHS from smt.applications.mixed_integer import ( FLOAT, ORD, ENUM, MixedIntegerSamplingMethod, ) xtypes = [FLOAT, (ENUM, 2)] xlimits = [[0.0, 4.0], ["blue", "red"]] sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese") num = 40 x = sampling(num) cmap = colors.ListedColormap(xlimits[1]) plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap) plt.show() def run_mixed_integer_qp_example(self): import numpy as np import matplotlib.pyplot as plt from smt.surrogate_models import QP from smt.applications.mixed_integer import MixedIntegerSurrogateModel, ORD xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0]) # xtypes = [FLOAT, ORD, (ENUM, 3), (ENUM, 2)] # FLOAT means x1 continuous # ORD means x2 ordered # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable sm = MixedIntegerSurrogateModel(xtypes=[ORD], xlimits=[[0, 4]], surrogate=QP()) sm.set_training_values(xt, yt) sm.train() num = 100 x = np.linspace(0.0, 4.0, num) y = sm.predict_values(x) plt.plot(xt, yt, "o") plt.plot(x, y) plt.xlabel("x") plt.ylabel("y") plt.legend(["Training data", "Prediction"]) plt.show() def run_mixed_integer_context_example(self): import numpy as np import matplotlib.pyplot as plt from matplotlib import colors from mpl_toolkits.mplot3d import Axes3D from smt.surrogate_models import KRG from smt.sampling_methods import LHS, Random from smt.applications.mixed_integer import MixedIntegerContext, FLOAT, ORD, ENUM xtypes = [ORD, FLOAT, (ENUM, 4)] xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]] def ftest(x): return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1) # context to create consistent DOEs and surrogate mixint = MixedIntegerContext(xtypes, xlimits) # DOE for training lhs = mixint.build_sampling_method(LHS, criterion="ese") num = mixint.get_unfolded_dimension() * 5 print("DOE point nb = {}".format(num)) xt = lhs(num) yt = ftest(xt) # Surrogate sm = mixint.build_surrogate_model(KRG()) sm.set_training_values(xt, yt) sm.train() # DOE for validation rand = mixint.build_sampling_method(Random) xv = rand(50) yv = ftest(xv) yp = sm.predict_values(xv) plt.plot(yv, yv) plt.plot(yv, yp, "o") plt.xlabel("actual") plt.ylabel("prediction") plt.show() def test_mixed_gower_2D(self): from smt.applications.mixed_integer import ( MixedIntegerSurrogateModel, ENUM, FLOAT, GOWER, ) from smt.surrogate_models import KRG import matplotlib.pyplot as plt import numpy as np import itertools xt = np.array([[0, 5], [2, -1], [4, 0.5]]) yt = np.array([[0.0], [1.0], [1.5]]) xlimits = [["0.0", "1.0", " 2.0",
: double or iter-of-doubles, optional Specifies an offset for each channel in the input data. The final input data is set after applying scaling and subtracting the specified offsets. Default: (103.939, 116.779, 123.68) pre_trained_weights : bool, optional Specifies whether to use the pre-trained weights trained on the ImageNet data set. Default: False pre_trained_weights_file : string, optional Specifies the file name for the pre-trained weights. Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5) Note: Required when pre_trained_weights=True. include_top : bool, optional Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers). Default: False Returns ------- :class:`Sequential` If `pre_trained_weights` is False :class:`Model` If `pre_trained_weights` is True References ---------- https://arxiv.org/pdf/1409.1556.pdf ''' conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn') if not pre_trained_weights: model = Sequential(conn=conn, model_table=model_table) model.add(InputLayer(n_channels=n_channels, width=width, height=height, scale=scale, offsets=offsets, random_flip=random_flip, random_crop=random_crop)) model.add(Conv2d(n_filters=64, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=64, width=3, height=3, stride=1)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) model.add(Conv2d(n_filters=128, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=128, width=3, height=3, stride=1)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) model.add(Conv2d(n_filters=256, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=256, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=256, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=256, width=3, height=3, stride=1)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Conv2d(n_filters=512, width=3, height=3, stride=1)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) model.add(Dense(n=4096, dropout=0.5)) model.add(Dense(n=4096, dropout=0.5)) model.add(OutputLayer(n=n_classes)) return model else: if pre_trained_weights_file is None: raise DLPyError('\nThe pre-trained weights file is not specified.\n' 'Please follow the steps below to attach the pre-trained weights:\n' '1. go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ ' 'and download the associated weight file.\n' '2. upload the *.h5 file to ' 'a server side directory which the CAS session has access to.\n' '3. specify the pre_trained_weights_file using the fully qualified server side path.') model_cas = model_vgg19.VGG19_Model(s=conn, model_table=model_table, n_channels=n_channels, width=width, height=height, random_crop=random_crop, offsets=offsets) if include_top: if n_classes != 1000: warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning) model = Model.from_table(model_cas) model.load_weights(path=pre_trained_weights_file, labels=True) return model else: model = Model.from_table(model_cas, display_note=False) model.load_weights(path=pre_trained_weights_file) weight_table_options = model.model_weights.to_table_params() weight_table_options.update(dict(where='_LayerID_<22')) model._retrieve_('table.partition', table=weight_table_options, casout=dict(replace=True, **model.model_weights.to_table_params())) model._retrieve_('deeplearn.removelayer', model=model_table, name='fc8') model._retrieve_('deeplearn.addlayer', model=model_table, name='fc8', layer=dict(type='output', n=n_classes, act='softmax'), srcLayers=['fc7']) model = Model.from_table(conn.CASTable(model_table)) return model def ResNet18_SAS(conn, model_table='RESNET18_SAS', batch_norm_first=True, n_classes=1000, n_channels=3, width=224, height=224, scale=1, random_flip='none', random_crop='none', offsets=(103.939, 116.779, 123.68)): ''' Generates a deep learning model with the ResNet18 architecture. Compared to Caffe ResNet18, the model prepends a batch normalization layer to the last global pooling layer. Parameters ---------- conn : CAS Specifies the CAS connection object. model_table : string, optional Specifies the name of CAS table to store the model. batch_norm_first : bool, optional Specifies whether to have batch normalization layer before the convolution layer in the residual block. For a detailed discussion about this, please refer to this paper: <NAME>, et al. "Identity mappings in deep residual networks." European Conference on Computer Vision. Springer International Publishing, 2016. Default: True n_classes : int, optional Specifies the number of classes. If None is assigned, the model will automatically detect the number of classes based on the training set. Default: 1000 n_channels : int, optional Specifies the number of the channels (i.e., depth) of the input layer. Default: 3 width : int, optional Specifies the width of the input layer. Default: 224 height : int, optional Specifies the height of the input layer. Default: 224 scale : double, optional Specifies a scaling factor to be applied to each pixel intensity values. Default: 1 random_flip : string, optional Specifies how to flip the data in the input layer when image data is used. Approximately half of the input data is subject to flipping. Valid Values: 'h', 'hv', 'v', 'none' Default: 'none' random_crop : string, optional Specifies how to crop the data in the input layer when image data is used. Images are cropped to the values that are specified in the width and height parameters. Only the images with one or both dimensions that are larger than those sizes are cropped. Valid Values: 'none', 'unique' Default: 'none' offsets : double or iter-of-doubles, optional Specifies an offset for each channel in the input data. The final input data is set after applying scaling and subtracting the specified offsets. Default: (103.939, 116.779, 123.68) Returns ------- :class:`Sequential` References ---------- https://arxiv.org/pdf/1512.03385.pdf ''' conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn') model = Sequential(conn=conn, model_table=model_table) model.add(InputLayer(n_channels=n_channels, width=width, height=height, scale=scale, offsets=offsets, random_flip=random_flip, random_crop=random_crop)) # Top layers model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2)) model.add(BN(act='relu')) model.add(Pooling(width=3, stride=2)) kernel_sizes_list = [(3, 3), (3, 3), (3, 3), (3, 3)] n_filters_list = [(64, 64), (128, 128), (256, 256), (512, 512)] rep_nums_list = [2, 2, 2, 2] for i in range(4): kernel_sizes = kernel_sizes_list[i] n_filters = n_filters_list[i] for rep_num in range(rep_nums_list[i]): if i == 0: strides = 1 else: if rep_num == 0: strides = 2 else: strides = 1 model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters, strides=strides, batch_norm_first=batch_norm_first)) # Bottom Layers pooling_size = (width // 2 // 2 // 2 // 2 // 2, height // 2 // 2 // 2 // 2 // 2) model.add(Pooling(width=pooling_size[0], height=pooling_size[1], pool='mean')) model.add(OutputLayer(act='softmax', n=n_classes)) return model def ResNet18_Caffe(conn, model_table='RESNET18_CAFFE', batch_norm_first=False, n_classes=1000, n_channels=3, width=224, height=224, scale=1, random_flip='none', random_crop='none', offsets=None): ''' Generates a deep learning model with the ResNet18 architecture with convolution shortcut. Parameters ---------- conn : CAS Specifies the CAS connection object. model_table : string or dict or CAS table, optional Specifies the CAS table to store the deep learning model. batch_norm_first : bool, optional Specifies whether to have batch normalization layer before the convolution layer in the residual block. For a detailed discussion about this, please refer to this paper: He, Kaiming, et al. "Identity mappings in deep residual networks." European Conference on Computer Vision. Springer International Publishing, 2016. Default: False n_classes : int, optional Specifies the number of classes. If None is assigned, the model will automatically detect the number of classes based on the training set. Default: 1000 n_channels : int, optional Specifies the number of the channels (i.e., depth) of the input layer. Default: 3 width : int, optional Specifies the width of the input layer. Default: 224 height : int, optional Specifies the height of the input layer. Default: 224 scale : double, optional Specifies a scaling factor to be applied to each pixel intensity values. Default: 1 random_flip : string, optional Specifies how to flip the data in the input layer when image data is used. Approximately half of the input data is subject to flipping. Valid Values: 'h', 'hv', 'v', 'none' Default: 'none' random_crop : string, optional Specifies how to crop the data in the input layer when image data is used. Images are cropped to the values that are specified in the width and height parameters. Only the images with one or both dimensions that are larger than those sizes are cropped. Valid Values: 'none', 'unique' Default: 'none' offsets : double or iter-of-doubles, optional Specifies an offset for each channel in the input data. The final input data is set after applying scaling and subtracting the specified offsets. Returns ------- :class:`Sequential` References ---------- https://arxiv.org/pdf/1512.03385.pdf ''' conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn') model = Sequential(conn=conn, model_table=model_table) model.add(InputLayer(n_channels=n_channels, width=width, height=height, scale=scale, offsets=offsets, random_flip=random_flip, random_crop=random_crop)) # Top layers model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2)) model.add(BN(act='relu')) model.add(Pooling(width=3, stride=2)) kernel_sizes_list = [(3, 3), (3, 3), (3, 3), (3, 3)] n_filters_list = [(64, 64), (128, 128), (256, 256), (512, 512)] rep_nums_list = [2, 2, 2, 2] for i in range(4): kernel_sizes = kernel_sizes_list[i] n_filters = n_filters_list[i] for rep_num in range(rep_nums_list[i]): if rep_num == 0: conv_short_cut = True if i == 0: strides = 1 else: strides = 2 else: conv_short_cut = False strides = 1 model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes, n_filters=n_filters, strides=strides, batch_norm_first=batch_norm_first, conv_short_cut=conv_short_cut)) # Bottom Layers pooling_size = (width // 2 // 2 // 2 // 2 // 2, height // 2 // 2 // 2 // 2 // 2) model.add(Pooling(width=pooling_size[0], height=pooling_size[1], pool='mean')) model.add(OutputLayer(act='softmax', n=n_classes)) return model def ResNet34_SAS(conn, model_table='RESNET34_SAS', n_classes=1000, n_channels=3, width=224, height=224, scale=1, batch_norm_first=True, random_flip='none', random_crop='none', offsets=(103.939, 116.779, 123.68)): '''
unknown application with pytest.raises(RPCError) as exc: rpc.stop_application('appli') assert exc.value.args == (Faults.BAD_NAME, 'appli') assert mocked_check.call_args_list == [call()] assert mocked_stop.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with stopped application application = rpc.supvisors.context.applications['appli_1'] with pytest.raises(RPCError) as exc: rpc.stop_application('appli_1') assert exc.value.args == (Faults.NOT_RUNNING, 'appli_1') assert mocked_check.call_args_list == [call()] assert mocked_stop.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with running application appli_1.has_running_processes.return_value = True # test no wait and done mocked_stop.return_value = True result = rpc.stop_application('appli_1', False) assert not result assert mocked_check.call_args_list == [call()] assert mocked_stop.call_args_list == [call(application)] assert mocked_progress.call_count == 0 mocked_check.reset_mock() mocked_stop.reset_mock() # test wait and done mocked_stop.return_value = True result = rpc.stop_application('appli_1') assert not result assert mocked_check.call_args_list == [call()] assert mocked_stop.call_args_list == [call(application)] assert mocked_progress.call_count == 0 mocked_check.reset_mock() mocked_stop.reset_mock() # test wait and not done mocked_stop.return_value = False result = rpc.stop_application('appli_1') # result is a function assert callable(result) assert mocked_check.call_args_list == [call()] assert mocked_stop.call_args_list == [call(application)] assert mocked_progress.call_count == 0 # test returned function: return True when job in progress mocked_progress.return_value = True assert result() == NOT_DONE_YET assert mocked_progress.call_args_list == [call()] mocked_progress.reset_mock() # test returned function: raise exception if job not in progress anymore and application not running mocked_progress.return_value = False for _ in [ApplicationStates.STOPPING, ApplicationStates.RUNNING, ApplicationStates.STARTING]: with pytest.raises(RPCError) as exc: result() assert exc.value.args == (Faults.ABNORMAL_TERMINATION, 'appli_1') assert mocked_progress.call_args_list == [call()] mocked_progress.reset_mock() # test returned function: return True if job not in progress anymore and application running application.state = ApplicationStates.STOPPED assert result() assert mocked_progress.call_args_list == [call()] # reset patches for next loop mocked_check.reset_mock() mocked_stop.reset_mock() mocked_progress.reset_mock() def test_restart_application(mocker, rpc): """ Test the restart_application RPC. """ mocked_check = mocker.patch('supvisors.rpcinterface.RPCInterface._check_operating') mocked_start = mocker.patch('supvisors.rpcinterface.RPCInterface.start_application') mocked_stop = mocker.patch('supvisors.rpcinterface.RPCInterface.stop_application') # test RPC call with sub-RPC calls return a direct result mocked_stop.return_value = True mocked_start.return_value = False deferred = rpc.restart_application(0, 'appli', 'wait') assert mocked_check.call_args_list == [call()] assert mocked_stop.call_args_list == [call('appli', True)] assert mocked_start.call_count == 0 mocked_stop.reset_mock() mocked_check.reset_mock() # result is a function assert callable(deferred) assert deferred.waitstop # test this function assert not deferred() assert not deferred.waitstop assert mocked_stop.call_count == 0 assert mocked_start.call_args_list == [call(0, 'appli', 'wait')] mocked_start.reset_mock() # test RPC call with sub_RPC calls returning jobs # test with mocking functions telling that the jobs are not completed mocked_stop_job = Mock(return_value=False) mocked_start_job = Mock(return_value=False) mocked_stop.return_value = mocked_stop_job mocked_start.return_value = mocked_start_job deferred = rpc.restart_application(0, 'appli', 'wait') assert mocked_check.call_args_list == [call()] assert mocked_stop.call_args_list == [call('appli', True)] assert mocked_start.call_count == 0 mocked_stop.reset_mock() # result is a function for deferred result assert callable(deferred) assert deferred.waitstop # first call to this function tells that job is still in progress assert mocked_stop_job.call_count == 0 assert mocked_start_job.call_count == 0 assert deferred() == NOT_DONE_YET assert mocked_stop.call_count == 0 assert mocked_start.call_count == 0 assert mocked_stop_job.call_args_list == [call()] assert mocked_start_job.call_count == 0 mocked_stop_job.reset_mock() # replace the stop job with a function telling that the job is completed mocked_stop_job.return_value = True assert deferred() == NOT_DONE_YET assert not deferred.waitstop assert mocked_stop.call_count == 0 assert mocked_start.call_args_list == [call(0, 'appli', 'wait')] assert mocked_stop_job.call_args_list == [call()] assert mocked_start_job.call_count == 0 mocked_stop_job.reset_mock() # call the deferred function again to check that the start is engaged assert not deferred() assert mocked_start_job.call_args_list == [call()] assert mocked_stop_job.call_count == 0 def test_start_args(mocker, rpc): """ Test the start_args RPC. """ mocker.patch('supvisors.rpcinterface.RPCInterface._get_application_process', return_value=(None, Mock(namespec='appli:proc'))) # prepare context info_source = rpc.supvisors.info_source info_source.update_extra_args.side_effect = KeyError mocked_startProcess = info_source.supervisor_rpc_interface.startProcess mocked_startProcess.side_effect = [RPCError(Faults.NO_FILE, 'no file'), RPCError(Faults.NOT_EXECUTABLE), RPCError(Faults.ABNORMAL_TERMINATION), 'done'] # test RPC call with extra arguments but with a process that is unknown to Supervisor with pytest.raises(RPCError) as exc: rpc.start_args('appli:proc', 'dummy arguments') assert exc.value.args == (Faults.BAD_NAME, 'namespec appli:proc unknown to this Supervisor instance') assert info_source.update_extra_args.call_args_list == [call('appli:proc', 'dummy arguments')] assert mocked_startProcess.call_count == 0 # update mocking info_source.update_extra_args.reset_mock() info_source.update_extra_args.side_effect = None # test RPC call with start exceptions # NO_FILE exception triggers an update of the process state with pytest.raises(RPCError) as exc: rpc.start_args('appli:proc') assert exc.value.args == (Faults.NO_FILE, 'no file') assert info_source.update_extra_args.call_args_list == [call('appli:proc', '')] assert mocked_startProcess.call_args_list == [call('appli:proc', True)] assert info_source.force_process_fatal.call_args_list == [call('appli:proc', 'NO_FILE: no file')] # reset patches info_source.update_extra_args.reset_mock() info_source.force_process_fatal.reset_mock() mocked_startProcess.reset_mock() # NOT_EXECUTABLE exception triggers an update of the process state with pytest.raises(RPCError) as exc: rpc.start_args('appli:proc', wait=False) assert exc.value.args == (Faults.NOT_EXECUTABLE, ) assert info_source.update_extra_args.call_args_list == [call('appli:proc', '')] assert mocked_startProcess.call_args_list == [call('appli:proc', False)] assert info_source.force_process_fatal.call_args_list == [call('appli:proc', 'NOT_EXECUTABLE')] # reset patches info_source.update_extra_args.reset_mock() info_source.force_process_fatal.reset_mock() mocked_startProcess.reset_mock() # other exception doesn't trigger an update of the process state with pytest.raises(RPCError) as exc: rpc.start_args('appli:proc', wait=False) assert exc.value.args == (Faults.ABNORMAL_TERMINATION, ) assert info_source.update_extra_args.call_args_list == [call('appli:proc', '')] assert mocked_startProcess.call_args_list == [call('appli:proc', False)] assert not info_source.force_process_fatal.called # reset patches info_source.update_extra_args.reset_mock() mocked_startProcess.reset_mock() # finally, normal behaviour assert rpc.start_args('appli:proc') == 'done' assert info_source.update_extra_args.call_args_list == [call('appli:proc', '')] assert mocked_startProcess.call_args_list == [call('appli:proc', True)] assert not info_source.force_process_fatal.called def test_start_process(mocker, rpc): """ Test the start_process RPC. """ mocked_check = mocker.patch('supvisors.rpcinterface.RPCInterface._check_operating') # get patches mocked_start = rpc.supvisors.starter.start_process mocked_progress = rpc.supvisors.starter.in_progress # patch the instance rpc._get_application_process = Mock() # test RPC call with unknown strategy with pytest.raises(RPCError) as exc: rpc.start_process('strategy', 'appli:proc') assert exc.value.args == (Faults.INCORRECT_PARAMETERS, 'incorrect strategy: strategy') assert mocked_check.call_args_list == [call()] assert mocked_start.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with running process rpc._get_application_process.return_value = (None, Mock(namespec='proc1', **{'running.return_value': True})) with pytest.raises(RPCError) as exc: rpc.start_process(0, 'appli_1') assert exc.value.args == (Faults.ALREADY_STARTED, 'proc1') assert mocked_check.call_args_list == [call()] assert mocked_start.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with running processes proc_1 = Mock(**{'running.return_value': False}) proc_2 = Mock(namespec='proc2', **{'running.return_value': True}) rpc._get_application_process.return_value = (Mock(**{'processes.values.return_value': [proc_1, proc_2]}), None) with pytest.raises(RPCError) as exc: rpc.start_process(0, 'appli_1') assert exc.value.args == (Faults.ALREADY_STARTED, 'proc2') assert mocked_check.call_args_list == [call()] assert mocked_start.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with stopped processes proc_1 = Mock(namespec='proc1', **{'running.return_value': False, 'stopped.return_value': True}) proc_2 = Mock(namespec='proc2', **{'running.return_value': False, 'stopped.return_value': False}) rpc._get_application_process.return_value = (Mock(**{'processes.values.return_value': [proc_1, proc_2]}), None) # test RPC call with no wait and not done mocked_start.return_value = False result = rpc.start_process(1, 'appli:*', 'argument list', False) assert result assert mocked_check.call_args_list == [call()] assert mocked_start.call_args_list == [call(StartingStrategies.LESS_LOADED, proc_1, 'argument list'), call(StartingStrategies.LESS_LOADED, proc_2, 'argument list')] assert not mocked_progress.called mocked_check.reset_mock() mocked_start.reset_mock() # test RPC call no wait and done mocked_start.return_value = True with pytest.raises(RPCError) as exc: rpc.start_process(1, 'appli:*', 'argument list', False) assert exc.value.args == (Faults.ABNORMAL_TERMINATION, 'appli:*') assert mocked_check.call_args_list == [call()] assert mocked_start.call_args_list == [call(StartingStrategies.LESS_LOADED, proc_1, 'argument list'), call(StartingStrategies.LESS_LOADED, proc_2, 'argument list')] assert not mocked_progress.called mocked_check.reset_mock() mocked_start.reset_mock() # test RPC call with wait and done with pytest.raises(RPCError) as exc: rpc.start_process(2, 'appli:*', wait=True) assert exc.value.args == (Faults.ABNORMAL_TERMINATION, 'appli:*') assert mocked_start.call_args_list == [call(StartingStrategies.MOST_LOADED, proc_1, ''), call(StartingStrategies.MOST_LOADED, proc_2, '')] assert not mocked_progress.called mocked_check.reset_mock() mocked_start.reset_mock() # test RPC call with wait and not done mocked_start.return_value = False deferred = rpc.start_process(2, 'appli:*', wait=True) # result is a function for deferred result assert callable(deferred) assert mocked_check.call_args_list == [call()] assert mocked_start.call_args_list == [call(StartingStrategies.MOST_LOADED, proc_1, ''), call(StartingStrategies.MOST_LOADED, proc_2, '')] assert not mocked_progress.called # test returned function: return True when job in progress mocked_progress.return_value = True assert deferred() == NOT_DONE_YET assert mocked_progress.call_args_list == [call()] mocked_progress.reset_mock() # test returned function: raise exception if job not in progress anymore and process still stopped mocked_progress.return_value = False with pytest.raises(RPCError) as exc: deferred() assert exc.value.args == (Faults.ABNORMAL_TERMINATION, 'proc1') assert mocked_progress.call_args_list == [call()] mocked_progress.reset_mock() # test returned function: return True if job not in progress anymore and process running proc_1.stopped.return_value = False assert deferred() assert mocked_progress.call_args_list == [call()] def test_stop_process(mocker, rpc): """ Test the stop_process RPC. """ mocked_check = mocker.patch('supvisors.rpcinterface.RPCInterface._check_operating_conciliation') # get patches mocked_stop = rpc.supvisors.stopper.stop_process mocked_progress = rpc.supvisors.stopper.in_progress # patch the instance rpc._get_application_process = Mock() # test RPC call with running process rpc._get_application_process.return_value = (None, Mock(namespec='proc1', **{'stopped.return_value': True})) with pytest.raises(RPCError) as exc: rpc.stop_process('appli_1') assert exc.value.args == (Faults.NOT_RUNNING, 'proc1') assert mocked_check.call_args_list == [call()] assert mocked_stop.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with running processes proc_1 = Mock(**{'stopped.return_value': False}) proc_2 = Mock(namespec='proc2', **{'stopped.return_value': True}) rpc._get_application_process.return_value = (Mock(**{'processes.values.return_value': [proc_1, proc_2]}), None) with pytest.raises(RPCError) as exc: rpc.stop_process('appli_1') assert exc.value.args == (Faults.NOT_RUNNING, 'proc2') assert mocked_check.call_args_list == [call()] assert mocked_stop.call_count == 0 assert mocked_progress.call_count == 0 mocked_check.reset_mock() # test RPC call with stopped processes proc_1 = Mock(namespec='proc1', **{'running.return_value': True, 'stopped.return_value':
in training split, including label. -Future: is it useful to specify the size of only test for unsupervised learning? """ samples = JSONField() sizes = JSONField() supervision = CharField() has_test = BooleanField() has_validation = BooleanField() bin_count = IntegerField(null=True) featureset = ForeignKeyField(Featureset, backref='splitsets') label = ForeignKeyField(Label, deferrable='INITIALLY DEFERRED', null=True, backref='splitsets') def from_featureset( featureset_id:int , label_id:int = None , size_test:float = None , size_validation:float = None , bin_count:float = None ): if (size_test is not None): if (size_test <= 0.0) or (size_test >= 1.0): raise ValueError("\nYikes - `size_test` must be between 0.0 and 1.0\n") # Don't handle `has_test` here. Need to check label first. if (size_validation is not None) and (size_test is None): raise ValueError("\nYikes - you specified a `size_validation` without setting a `size_test`.\n") if (size_validation is not None): if (size_validation <= 0.0) or (size_validation >= 1.0): raise ValueError("\nYikes - `size_test` must be between 0.0 and 1.0\n") sum_test_val = size_validation + size_test if sum_test_val >= 1.0: raise ValueError("\nYikes - Sum of `size_test` + `size_test` must be between 0.0 and 1.0 to leave room for training set.\n") """ Have to run train_test_split twice do the math to figure out the size of 2nd split. Let's say I want {train:0.67, validation:0.13, test:0.20} The first test_size is 20% which leaves 80% of the original data to be split into validation and training data. (1.0/(1.0-0.20))*0.13 = 0.1625 """ pct_for_2nd_split = (1.0/(1.0-size_test))*size_validation has_validation = True else: has_validation = False f = Featureset.get_by_id(featureset_id) f_cols = f.columns # Feature data to be split. d = f.dataset arr_f = Dataset.to_numpy(id=d.id, columns=f_cols) """ Simulate an index to be split alongside features and labels in order to keep track of the samples being used in the resulting splits. """ row_count = arr_f.shape[0] arr_idx = np.arange(row_count) samples = {} sizes = {} if label_id is None: has_test = False supervision = "unsupervised" l = None if (size_test is not None) or (size_validation is not None): raise ValueError(dedent(""" Yikes - Unsupervised Featuresets support neither test nor validation splits. Set both `size_test` and `size_validation` as `None` for this Featureset. """)) else: indices_lst_train = arr_idx.tolist() samples["train"] = indices_lst_train sizes["train"] = {"percent": 1.00, "count": row_count} elif (label_id is not None): # We don't need to prevent duplicate Label/Featureset combos because Splits generate different samples each time. l = Label.get_by_id(label_id) # Check number of samples in Label vs Featureset, because they can come from different Datasets. l_dataset_id = l.dataset.id l_length = Dataset.Tabular.get_main_file(l_dataset_id).shape['rows'] if (l_dataset_id != d.id): if (d.dataset_type == 'tabular'): f_length = Dataset.Tabular.get_main_file(d.id).shape['rows'] elif (d.dataset_type == 'image'): f_length = f.dataset.file_count # Separate `if` to compare them. if (l_length != f_length): raise ValueError("\nYikes - The Datasets of your Label and Featureset do not contains the same number of samples.\n") if size_test is None: size_test = 0.30 has_test = True supervision = "supervised" label_array = l.to_numpy() # check for OHE cols and reverse them so we can still stratify. if (label_array.shape[1] > 1): encoder = OneHotEncoder(sparse=False) label_array = encoder.fit_transform(label_array) label_array = np.argmax(label_array, axis=1) # argmax flattens the array, so reshape it to array of arrays. count = label_array.shape[0] l_cat_shaped = label_array.reshape(count, 1) # OHE dtype returns as int64 label_dtype = label_array.dtype stratifier1, bin_count = Splitset.stratifier_by_dtype_binCount( label_dtype = label_dtype, label_array = label_array, bin_count = bin_count ) """ - `sklearn.model_selection.train_test_split` = https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html - `shuffle` happens before the split. Although preserves a df's original index, we don't need to worry about that because we are providing our own indices. - Don't include the Dataset.Image.featureset pixel arrays in stratification. """ if (d.dataset_type == 'tabular'): features_train, features_test, labels_train, labels_test, indices_train, indices_test = train_test_split( arr_f, label_array, arr_idx , test_size = size_test , stratify = stratifier1 , shuffle = True ) if (size_validation is not None): stratifier2, bin_count = Splitset.stratifier_by_dtype_binCount( label_dtype = label_dtype, label_array = labels_train, #This split is different from stratifier1. bin_count = bin_count ) features_train, features_validation, labels_train, labels_validation, indices_train, indices_validation = train_test_split( features_train, labels_train, indices_train , test_size = pct_for_2nd_split , stratify = stratifier2 , shuffle = True ) indices_lst_validation = indices_validation.tolist() samples["validation"] = indices_lst_validation elif (d.dataset_type == 'image'): # Features not involved. labels_train, labels_test, indices_train, indices_test = train_test_split( label_array, arr_idx , test_size = size_test , stratify = stratifier1 , shuffle = True ) if (size_validation is not None): stratifier2, bin_count = Splitset.stratifier_by_dtype_binCount( label_dtype = label_dtype, label_array = labels_train, #This split is different from stratifier1. bin_count = bin_count ) labels_train, labels_validation, indices_train, indices_validation = train_test_split( labels_train, indices_train , test_size = pct_for_2nd_split , stratify = stratifier2 , shuffle = True ) indices_lst_validation = indices_validation.tolist() samples["validation"] = indices_lst_validation indices_lst_train, indices_lst_test = indices_train.tolist(), indices_test.tolist() samples["train"] = indices_lst_train samples["test"] = indices_lst_test size_train = 1.0 - size_test if size_validation is not None: size_train -= size_validation count_validation = len(indices_lst_validation) sizes["validation"] = {"percent": size_validation, "count": count_validation} count_test = len(indices_lst_test) count_train = len(indices_lst_train) sizes["test"] = {"percent": size_test, "count": count_test} sizes["train"] = {"percent": size_train, "count": count_train} s = Splitset.create( featureset = f , label = l , samples = samples , sizes = sizes , supervision = supervision , has_test = has_test , has_validation = has_validation , bin_count = bin_count ) return s def to_pandas( id:int , splits:list = None , include_label:bool = None , include_featureset:bool = None , feature_columns:list = None ): splits = listify(splits) feature_columns = listify(feature_columns) split_frames = Splitset.get_splits( id = id , numpy_or_pandas = 'pandas' , splits = splits , include_label = include_label , include_featureset = include_featureset , feature_columns = feature_columns ) return split_frames def to_numpy( id:int , splits:list = None , include_label:bool = None , include_featureset:bool = None , feature_columns:list = None ): splits = listify(splits) feature_columns = listify(feature_columns) split_arrs = Splitset.get_splits( id = id , numpy_or_pandas = 'numpy' , splits = splits , include_label = include_label , include_featureset = include_featureset , feature_columns = feature_columns ) return split_arrs def get_splits(id:int , numpy_or_pandas:str # Machine set, so don't validate. , splits:list = None , include_label:bool = None # Unsupervised can't be True. , include_featureset:bool = None , feature_columns:list = None ): """ Future: Optimize! - Worried it's holding all dataframes and arrays in memory. - Generators to access one [key][set] at a time? """ s = Splitset.get_by_id(id) splits = listify(splits) feature_columns = listify(feature_columns) splits = list(s.samples.keys()) supervision = s.supervision featureset = s.featureset split_frames = {} # Future: Optimize (switch to generators for memory usage). # Here, split_names are: train, test, validation. # There are always featureset. It's just if you want to include them or not. # Saves memory when you only want Labels by split. if (include_featureset is None): include_featureset = True if (supervision == "unsupervised"): if (include_label is None): include_label = False elif (include_label == True): raise ValueError("\nYikes - `include_label == True` but `Splitset.supervision=='unsupervised'`.\n") elif (supervision == "supervised"): if (include_label is None): include_label = True if ((include_featureset == False) and (include_label == False)): raise ValueError("\nYikes - Both `include_featureset` and `include_label` cannot be False.\n") if ((feature_columns is not None) and (include_featureset != True)): raise ValueError("\nYikes - `feature_columns` must be None if `include_label==False`.\n") for split_name in splits: # Placeholder for the frames/arrays. split_frames[split_name] = {} # Fetch the sample indices for the split split_samples = s.samples[split_name] if (include_featureset == True): if (numpy_or_pandas == 'numpy'): ff = featureset.to_numpy(samples=split_samples, columns=feature_columns) elif (numpy_or_pandas == 'pandas'): ff = featureset.to_pandas(samples=split_samples, columns=feature_columns) split_frames[split_name]["features"] = ff if (include_label == True): l = s.label if (numpy_or_pandas == 'numpy'): lf = l.to_numpy(samples=split_samples) elif (numpy_or_pandas == 'pandas'): lf = l.to_pandas(samples=split_samples) split_frames[split_name]["labels"] = lf return split_frames def label_values_to_bins(array_to_bin:object, bin_count:int): """ Overwites continuous Label values with bin numbers for statification & folding. Switched to `pd.qcut` because `np.digitize` never had enough samples in the up the leftmost/right bin. """ # Make 1D for qcut. array_to_bin = array_to_bin.flatten() # For really unbalanced labels, I ran into errors where bin boundaries would be duplicates all the way down to 2 bins. # Setting `duplicates='drop'` to address this. bin_numbers = pd.qcut(x=array_to_bin, q=bin_count, labels=False, duplicates='drop') # Convert 1D array back to 2D for the rest of the program. bin_numbers = np.reshape(bin_numbers, (-1, 1)) return bin_numbers def stratifier_by_dtype_binCount(label_dtype:object, label_array:object, bin_count:int=None): # Based on the dtype and bin_count determine how to stratify. # Automatically bin floats. if np.issubdtype(label_dtype, np.floating): if (bin_count is None): bin_count = 3 stratifier = Splitset.label_values_to_bins(array_to_bin=label_array, bin_count=bin_count) # Allow ints to pass either binned or unbinned. elif ( (np.issubdtype(label_dtype, np.signedinteger)) or (np.issubdtype(label_dtype, np.unsignedinteger)) ): if (bin_count is not None): stratifier = Splitset.label_values_to_bins(array_to_bin=label_array, bin_count=bin_count) elif (bin_count is None): # Assumes the int is for classification. stratifier = label_array # Reject binned objs. elif (np.issubdtype(label_dtype, np.number) == False): if (bin_count is not None): raise ValueError(dedent(""" Yikes - Your Label is not numeric (neither `np.floating`, `np.signedinteger`, `np.unsignedinteger`). Therefore, you cannot provide a value for `bin_count`. \n""")) elif (bin_count is None): stratifier = label_array return stratifier, bin_count def make_foldset( id:int , fold_count:int = None , bin_count:int = None ): foldset = Foldset.from_splitset( splitset_id = id , fold_count = fold_count , bin_count = bin_count ) return foldset def make_encoderset( id:int , encoder_count:int = 0 , description:str = None ): e = Encoderset.from_splitset( splitset_id = id , encoder_count = 0 , description = description ) return e class Foldset(BaseModel): """ - Contains aggregate summary statistics and evaluate metrics for all Folds. """ fold_count = IntegerField() random_state = IntegerField() bin_count = IntegerField(null=True) # For stratifying continuous features. #ToDo: max_samples_per_bin = IntegerField() #ToDo: min_samples_per_bin = IntegerField() splitset = ForeignKeyField(Splitset, backref='foldsets') def from_splitset( splitset_id:int , fold_count:int = None , bin_count:int = None ): splitset = Splitset.get_by_id(splitset_id) new_random = False while new_random == False: random_state = random.randint(0, 4294967295) #2**32 - 1 inclusive matching_randoms = splitset.foldsets.select().where(Foldset.random_state==random_state) count_matches = matching_randoms.count() if count_matches == 0: new_random = True if (fold_count is None): fold_count = 5 # More likely than 4 to be evenly divisible. else: if (fold_count < 2): raise ValueError(dedent(f""" Yikes - Cross validation requires multiple folds. But you provided `fold_count`: <{fold_count}>. """)) elif (fold_count == 2): print("\nWarning - Instead of two folds, why not just use a validation split?\n") # Get the training indices. The actual values of the features don't matter, only label values needed for stratification. arr_train_indices = splitset.samples["train"] arr_train_labels = splitset.label.to_numpy(samples=arr_train_indices) # If the Labels are binned *overwite* the values w bin numbers. Otherwise untouched. label_dtype = arr_train_labels.dtype #
TABLE IF EXISTS %s' % table_name) access.execute_query(CreateTableQuery(if_not_exists=True, table_or_subquery=table_name, column_name=(['pk'] if include_pk else []) + columns, column_constraint=column_constraint, table_constraint="PRIMARY KEY (pk)" if include_pk else None)) with Process('Saving %s' % self._name, stop-start) as p: for i in range(start, stop, n): access.execute_query(InsertQuery(table_or_subquery=table_name, replace=True, column_name=['pk' if include_pk else 'rowid'] + columns, update_value='?' + (',?'*(len(columns)))), format_gen(p)) i_global += n access.write_db('PRAGMA journal_mode=DELETE') def export_files(self, path, columns=None, stop=None, start=0, filename_column=None, metadata_file='.xlsx', determinist=True, ncore=1, overwrite=True): import pandas # --- HANDLE PARAMETERS --- # Create path os.makedirs(path, exist_ok=True) # Handle columns exported_columns = set() columns_mapping = OrderedDict() single_column = False if isinstance(columns, (list, tuple, set)): columns = OrderedDict() for _ in columns: columns[_] = _ if isinstance(columns, str): if columns not in self.columns_name(): raise ValueError('Unknown column %s' % columns) columns_mapping = {'': columns} exported_columns = {columns} single_column = True elif isinstance(columns, DSColumn): if columns.dataset is not self: raise ValueError('%s is not a column of %s' % (columns.name, self.dataset_name)) columns_mapping = {'': columns.name} exported_columns = {columns.name} single_column = True elif is_dict(columns): for c_name, c in columns.items(): if not isinstance(c_name, str): raise ValueError('columns key should be str (received: %s)' % type(c_name).__name__) if isinstance(c, DSColumn): if c.dataset is not self: raise ValueError('%s is not a column of %s' % (c.name, self.dataset_name)) c = c.name if isinstance(c, str): if c not in self.columns_name(): raise ValueError('Unknown column %s' % c) exported_columns.add(c) columns_mapping[c_name] = c else: raise ValueError('Invalid columns value. Expected type is str or DSColumn, received %s.' % type(c).__name__) # Handle filename_column if filename_column is None: for n in ('name', 'filename'): if n in self.columns_name(): filename_column = n break if filename_column is None: filename_column = 'pk' exported_columns.add(filename_column) # Handle metadata_file metadata_sheet = self.dataset_name if ':' in metadata_file: metadata_file, metadata_sheet = metadata_file.rsplit(':', 1) if metadata_file.startswith('.'): f = list(exported_columns)[0] if single_column else self.dataset_name metadata_file = f + metadata_file metadata_file = join(path, metadata_file) if exists(metadata_file): if overwrite: os.remove(metadata_file) else: raise RuntimeError('%s already exists.' % metadata_file) meta_name_column = filename_column if filename_column not in columns_mapping else None # Handle start stop start, stop = interval(size=self.size, start=start, stop=stop) with Process('Exporting '+self.dataset_name, total=stop-start, verbose=False) as p: from .dataset_generator import DataSetResult def write_cb(r: DataSetResult): metadata = OrderedDict() filename = r[0, filename_column] for c_to, c in columns_mapping.items(): c_data = r[0, c] col = self.column_by_name(c) c_path = path if single_column else join(path, c_to) meta = col.format.export_file(data=c_data, path=c_path, filename=filename, overwrite=overwrite) if meta is not None: metadata[c_to] = meta if metadata: meta_exists = exists(metadata_file) meta_columns = [] if meta_name_column is None else [meta_name_column] meta_columns += list(metadata.keys()) if metadata_file.endswith('xlsx'): if meta_exists: initial_df = pandas.read_excel(metadata_file) else: initial_df = pandas.DataFrame(columns=meta_columns) writer = pandas.ExcelWriter(metadata_file) initial_df.to_excel(writer, sheet_name=metadata_sheet, index=False) if meta_name_column is not None: metadata[meta_name_column] = filename df = pandas.DataFrame(metadata) df.to_excel(writer, sheet_name=metadata_sheet, index=False, startrow=r.start_id) writer.save() elif metadata_file.endswith('csv'): if not meta_exists: pandas.DataFrame(columns=meta_columns).to_csv(metadata_file, header=True, mode='w') if meta_name_column is not None: metadata[meta_name_column] = filename pandas.DataFrame(metadata).to_csv(metadata_file, header=False, mode='a') p.update(1) self.export(cb=write_cb, start=start, stop=stop, columns=exported_columns, n=1, determinist=determinist, ncore=ncore) def to_pytorch_dataset(self, ncore=1, intime='process'): from torch.utils.data import Dataset class CustomDataLoader(Dataset): def __init__(self, dataset): self._dataset = dataset self._gen = None def __len__(self): return self._dataset.size def __getitem__(self, item): if self._gen is None: self._gen = self._dataset.generator(ncore=ncore, intime=intime) try: return self._gen.next() except StopIteration: raise IndexError return CustomDataLoader(self) def cache(self, start=0, stop=None, columns=None, ncore=1, ondisk=None, name=None, overwrite='auto'): import tables from .dataset_generator import DataSetResult from collections import OrderedDict from ..j_utils.path import open_pytable start, stop = interval(self.size, start, stop) if columns is None: columns = self.columns_name() elif isinstance(columns, str): columns = [columns] else: columns = list(columns) if name is None: label = self._name name = 'cache' else: label = name hdf_t = None if ondisk: if isinstance(ondisk, bool): import tempfile ondisk = join(tempfile.gettempdir(), 'dataset_cache.hd5') + ':dataset' if isinstance(ondisk, str): ondisk_split = ondisk.split(':') if len(ondisk_split) == 1: path = ondisk_split[0] table_name = 'dataset' elif len(ondisk_split) == 2: path, table_name = ondisk_split else: raise ValueError('cache_path should be formated as "PATH:TABLE_NAME"') hdf_f = open_pytable(path) if not table_name.startswith('/'): table_name = '/' + table_name table_path, table_name = table_name.rsplit('/', maxsplit=1) if not table_path: table_path = '/' else: raise ValueError('cache_path should be formated as "PATH:TABLE_NAME"') try: hdf_t = hdf_f.get_node(table_path, table_name, 'Table') erase_table = False if overwrite and isinstance(overwrite, bool): erase_table = True elif overwrite == 'auto': if hdf_t.nrows != self.size: erase_table = True else: for col_name, hdf_col in hdf_t.description._v_colobjects.items(): col = self.column_by_name(col_name, raise_on_unknown=False) if col is None or hdf_col.dtype.shape != col.shape or hdf_col.dtype.base != col.dtype: erase_table = True break if erase_table: hdf_f.remove_node(table_path, table_name) hdf_t = None except tables.NoSuchNodeError: pass else: hdf_f = None hdf_i = 0 while hdf_f is None: try: hdf_f = tables.open_file("/tmp/tmpEmptyHDF_%i.h5" % hdf_i, "a", driver="H5FD_CORE", driver_core_backing_store=0) except tables.HDF5ExtError: hdf_i += 1 table_path = '/' table_name = 'dataset' if hdf_t is None: desc = OrderedDict() for i, c in enumerate(['pk']+columns): col = self.column_by_name(c) if col.shape == (): if col.is_text or col.dtype in ('O', object): desc[col.name] = tables.StringCol(1024, pos=i) else: desc[col.name] = tables.Col.from_dtype(col.dtype, pos=i) else: desc[col.name] = tables.Col.from_sctype(col.dtype.type, col.shape, pos=i) hdf_t = hdf_f.create_table(table_path, table_name, description=desc, expectedrows=self.size, createparents=True, track_times=False) chunck_size = min(stop - start, hdf_t.chunkshape[0]) if ncore > 1: with Process('Allocating %s' % label, stop-start, verbose=False) as p: empty = DataSetResult.create_empty(dataset=self, n=1).to_row_list()[0] for i in range(0, stop-start): hdf_t.append([empty]*min(chunck_size, stop-i)) p.step = i with Process('Caching %s' % label, stop-start, verbose=False) as p: from .dataset_generator import DataSetResult def write_back(r: DataSetResult): hdf_t.modify_rows(start=r.start_id-start, stop=r.stop_id-start, rows=r.to_row_list()) p.update(r.size) self.export(write_back, n=chunck_size, start=start, stop=stop, columns=columns, ncore=ncore) else: with Process('Caching %s' % label, stop-start, verbose=False) as p: for r in self.generator(n=chunck_size, start=start, stop=stop, determinist=True, columns=columns): hdf_t.append(r.to_row_list()) p.update(r.size) hdf_f.flush() from .datasets_core import PyTableDataSet hdfDataset = PyTableDataSet(hdf_t, name=name) for c in columns: hdfDataset.col[c].format = self.col[c].format hdfDataset.col[c]._is_text = self.col[c].is_text # Bof... return hdfDataset # --- Global operator --- def sum(self, columns=None, start=0, stop=None, ncore=1, n=1, determinist=True): single_column = isinstance(columns, (str, DSColumn)) columns = self.interpret_columns(columns) for c in columns: c = self.column_by_name(c) if not np.issubdtype(c.dtype, np.number): raise ValueError('Only numeric columns can be summed. (%s is not numeric, dtype: %s).' % (c.name, c.dtype)) from .dataset_generator import DataSetResult result = DataSetResult.create_empty(n=1, dataset=self, columns=columns) def write_cb(r): for c in r.keys(): result[0, c] += r[:, c].sum(axis=0) result.trace.affiliate_parent_trace(r.trace) self.export(write_cb, columns=columns, n=n, start=start, stop=stop, ncore=ncore, determinist=determinist) return result[columns[0]] if single_column else result def mean(self, columns=None, start=0, stop=None, std=False, ncore=1, n=1, determinist=True): single_column = isinstance(columns, (str, DSColumn)) columns = self.interpret_columns(columns) for c in columns: c = self.column_by_name(c) if not np.issubdtype(c.dtype, np.number): raise ValueError('Only numeric columns can be averaged. (%s is not numeric, dtype: %s).' % (c.name, c.dtype)) start, stop = interval(self.size, start, stop) from .dataset_generator import DataSetResult result = DataSetResult.create_from_data({c: np.zeros((2,)+self.col[c].shape, np.float) for c in columns}) if std: def write_cb(r): for c in columns: result[0, c] += r[:, c].sum(axis=0) / (stop - start) result[1, c] += np.square(r[:, c]).sum(axis=0) / (stop - start) result.trace.affiliate_parent_trace(r.trace) else: def write_cb(r): for c in columns: result[0, c] += r[:, c].sum(axis=0)/(stop-start) result.trace.affiliate_parent_trace(r.trace) self.export(write_cb, columns=columns, n=n, start=start, stop=stop, ncore=ncore, determinist=determinist) if std: for c in result.keys(): result[1, c] = np.sqrt(result[1, c]-np.square(result[0, c])) return result[:, columns[0]] if single_column else result return result[0, columns[0]] if single_column else result def std(self, columns=None, start=0, stop=None, ncore=1, n=1, determinist=True): single_column = isinstance(columns, (str, DSColumn)) columns = self.interpret_columns(columns) result = self.mean(columns=columns, start=start, stop=stop, ncore=ncore, n=n, determinist=determinist, std=True) return result[1, columns[0]] if single_column else result.truncate(start=1) def confusion_matrix(self, pred, true, weight=None, label=None, rowwise=False, start=0, stop=None, ncore=1, n=1, determinist=True): from ..j_utils.math import ConfMatrix # Handle pred, true and weight if isinstance(pred, DSColumn): if pred.dataset is not self: raise ValueError("%s is not a column of %s." % (pred.name, self.dataset_name)) elif isinstance(pred, str): pred = self.column_by_name(pred) else: raise ValueError("Invalid type for pred. (Expected type: str or DSColumn, received: %s)" % type(pred)) pred_shape = pred.shape if isinstance(true, DSColumn): if true.dataset is not self: raise ValueError("%s is not a column of %s." % (true.name, self.dataset_name)) elif isinstance(true, str): true = self.column_by_name(true) elif isinstance(true, np.ndarray): pass else: raise ValueError("Invalid type for true. (Expected type: str or DSColumn, received: %s)" % type(true)) true_shape = true.shape if weight is not None: if isinstance(weight, DSColumn): if weight.dataset is not self: raise ValueError("%s is not a column of %s." % (weight.name, self.dataset_name)) elif isinstance(weight, str): weight = self.column_by_name(weight) elif isinstance(weight,
: 2D array Specify the matrix that will be used to multiply the vector of subsystem outputs to obtain the vector of subsystem inputs. """ # Make sure the connection map is the right size if connect_map.shape != self.connect_map.shape: ValueError("Connection map is not the right shape") self.connect_map = connect_map def set_input_map(self, input_map): """Set the input map for an interconnected I/O system. Parameters ---------- input_map : 2D array Specify the matrix that will be used to multiply the vector of system inputs to obtain the vector of subsystem inputs. These values are added to the inputs specified in the connection map. """ # Figure out the number of internal inputs ninputs = sum(sys.ninputs for sys in self.syslist) # Make sure the input map is the right size if input_map.shape[0] != ninputs: ValueError("Input map is not the right shape") self.input_map = input_map self.ninputs = input_map.shape[1] def set_output_map(self, output_map): """Set the output map for an interconnected I/O system. Parameters ---------- output_map : 2D array Specify the matrix that will be used to multiply the vector of subsystem outputs to obtain the vector of system outputs. """ # Figure out the number of internal inputs and outputs ninputs = sum(sys.ninputs for sys in self.syslist) noutputs = sum(sys.noutputs for sys in self.syslist) # Make sure the output map is the right size if output_map.shape[1] == noutputs: # For backward compatibility, add zeros to the end of the array output_map = np.concatenate( (output_map, np.zeros((output_map.shape[0], ninputs))), axis=1) if output_map.shape[1] != noutputs + ninputs: ValueError("Output map is not the right shape") self.output_map = output_map self.noutputs = output_map.shape[0] def input_output_response(sys, T, U=0., X0=0, params={}, method='RK45', return_x=False, squeeze=True): """Compute the output response of a system to a given input. Simulate a dynamical system with a given input and return its output and state values. Parameters ---------- sys: InputOutputSystem Input/output system to simulate. T: array-like Time steps at which the input is defined; values must be evenly spaced. U: array-like or number, optional Input array giving input at each time `T` (default = 0). X0: array-like or number, optional Initial condition (default = 0). return_x : bool, optional If True, return the values of the state at each time (default = False). squeeze : bool, optional If True (default), squeeze unused dimensions out of the output response. In particular, for a single output system, return a vector of shape (nsteps) instead of (nsteps, 1). Returns ------- T : array Time values of the output. yout : array Response of the system. xout : array Time evolution of the state vector (if return_x=True) Raises ------ TypeError If the system is not an input/output system. ValueError If time step does not match sampling time (for discrete time systems) """ # Sanity checking on the input if not isinstance(sys, InputOutputSystem): raise TypeError("System of type ", type(sys), " not valid") # Compute the time interval and number of steps T0, Tf = T[0], T[-1] n_steps = len(T) # Check and convert the input, if needed # TODO: improve MIMO ninputs check (choose from U) if sys.ninputs is None or sys.ninputs == 1: legal_shapes = [(n_steps,), (1, n_steps)] else: legal_shapes = [(sys.ninputs, n_steps)] U = _check_convert_array(U, legal_shapes, 'Parameter ``U``: ', squeeze=False) # Check to make sure this is not a static function nstates = _find_size(sys.nstates, X0) if nstates == 0: # No states => map input to output u = U[0] if len(U.shape) == 1 else U[:, 0] y = np.zeros((np.shape(sys._out(T[0], X0, u))[0], len(T))) for i in range(len(T)): u = U[i] if len(U.shape) == 1 else U[:, i] y[:, i] = sys._out(T[i], [], u) if (squeeze): y = np.squeeze(y) if return_x: return T, y, [] else: return T, y # create X0 if not given, test if X0 has correct shape X0 = _check_convert_array(X0, [(nstates,), (nstates, 1)], 'Parameter ``X0``: ', squeeze=True) # Update the parameter values sys._update_params(params) # Create a lambda function for the right hand side u = sp.interpolate.interp1d(T, U, fill_value="extrapolate") def ivp_rhs(t, x): return sys._rhs(t, x, u(t)) # Perform the simulation if isctime(sys): if not hasattr(sp.integrate, 'solve_ivp'): raise NameError("scipy.integrate.solve_ivp not found; " "use SciPy 1.0 or greater") soln = sp.integrate.solve_ivp(ivp_rhs, (T0, Tf), X0, t_eval=T, method=method, vectorized=False) # Compute the output associated with the state (and use sys.out to # figure out the number of outputs just in case it wasn't specified) u = U[0] if len(U.shape) == 1 else U[:, 0] y = np.zeros((np.shape(sys._out(T[0], X0, u))[0], len(T))) for i in range(len(T)): u = U[i] if len(U.shape) == 1 else U[:, i] y[:, i] = sys._out(T[i], soln.y[:, i], u) elif isdtime(sys): # Make sure the time vector is uniformly spaced dt = T[1] - T[0] if not np.allclose(T[1:] - T[:-1], dt): raise ValueError("Parameter ``T``: time values must be " "equally spaced.") # Make sure the sample time matches the given time if (sys.dt is not True): # Make sure that the time increment is a multiple of sampling time # TODO: add back functionality for undersampling # TODO: this test is brittle if dt = sys.dt # First make sure that time increment is bigger than sampling time # if dt < sys.dt: # raise ValueError("Time steps ``T`` must match sampling time") # Check to make sure sampling time matches time increments if not np.isclose(dt, sys.dt): raise ValueError("Time steps ``T`` must be equal to " "sampling time") # Compute the solution soln = sp.optimize.OptimizeResult() soln.t = T # Store the time vector directly x = [float(x0) for x0 in X0] # State vector (store as floats) soln.y = [] # Solution, following scipy convention y = [] # System output for i in range(len(T)): # Store the current state and output soln.y.append(x) y.append(sys._out(T[i], x, u(T[i]))) # Update the state for the next iteration x = sys._rhs(T[i], x, u(T[i])) # Convert output to numpy arrays soln.y = np.transpose(np.array(soln.y)) y = np.transpose(np.array(y)) # Mark solution as successful soln.success = True # No way to fail else: # Neither ctime or dtime?? raise TypeError("Can't determine system type") # Get rid of extra dimensions in the output, of desired if (squeeze): y = np.squeeze(y) if return_x: return soln.t, y, soln.y else: return soln.t, y def find_eqpt(sys, x0, u0=[], y0=None, t=0, params={}, iu=None, iy=None, ix=None, idx=None, dx0=None, return_y=False, return_result=False, **kw): """Find the equilibrium point for an input/output system. Returns the value of an equlibrium point given the initial state and either input value or desired output value for the equilibrium point. Parameters ---------- x0 : list of initial state values Initial guess for the value of the state near the equilibrium point. u0 : list of input values, optional If `y0` is not specified, sets the equilibrium value of the input. If `y0` is given, provides an initial guess for the value of the input. Can be omitted if the system does not have any inputs. y0 : list of output values, optional If specified, sets the desired values of the outputs at the equilibrium point. t : float, optional Evaluation time, for time-varying systems params : dict, optional Parameter values for the system. Passed to the evaluation functions for the system as default values, overriding internal defaults. iu : list of input indices, optional If specified, only the inputs with the given indices will be fixed at the specified values in solving for an equilibrium point. All other inputs will be varied. Input indices can be listed in any order. iy : list of output indices, optional If specified, only the outputs with the given indices will be fixed at the specified values in solving for an equilibrium point. All other outputs will be varied. Output indices can be listed in any order. ix : list of state indices, optional If specified, states with the given indices will be fixed at the specified values in solving for an equilibrium point. All
#!/usr/bin/env python # # Copyright 2019 DFKI GmbH. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the # following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. """ Functions for retargeting based on the paper "Using an Intermediate Skeleton and Inverse Kinematics for Motion Retargeting" by Monzani et al. See: http://www.vis.uni-stuttgart.de/plain/vdl/vdl_upload/91_35_retargeting%20monzani00using.pdf """ import numpy as np import math from transformations import quaternion_matrix, quaternion_multiply, quaternion_about_axis, quaternion_from_matrix from .constants import OPENGL_UP_AXIS from .utils import normalize, align_axis, find_rotation_between_vectors, align_root_translation, to_local_cos, get_quaternion_rotation_by_name, apply_additional_rotation_on_frames, project_vector_on_axis, quaternion_from_vector_to_vector from ..animation_data.skeleton_models import JOINT_CHILD_MAP def create_local_cos_map_using_child_map(skeleton, up_vector, x_vector, child_map=None): joint_cos_map = dict() for j in list(skeleton.nodes.keys()): joint_cos_map[j] = dict() joint_cos_map[j]["y"] = up_vector joint_cos_map[j]["x"] = x_vector if j == skeleton.root: joint_cos_map[j]["x"] = (-np.array(x_vector)).tolist() else: o = np.array([0, 0, 0]) if child_map is not None and j in child_map: child_name = child_map[j] node = skeleton.nodes[child_name] o = np.array(node.offset) elif len(skeleton.nodes[j].children) > 0: node = skeleton.nodes[j].children[0] o = np.array(node.offset) o = normalize(o) if sum(o * o) > 0: joint_cos_map[j]["y"] = o return joint_cos_map def create_local_cos_map(skeleton, up_vector, x_vector): joint_cos_map = dict() for j in list(skeleton.nodes.keys()): joint_cos_map[j] = dict() joint_cos_map[j]["y"] = up_vector joint_cos_map[j]["x"] = x_vector if j == skeleton.root: joint_cos_map[j]["x"] = (-np.array(x_vector)).tolist() return joint_cos_map def get_body_x_axis(skeleton): rh = skeleton.skeleton_model["joints"]["right_hip"] lh = skeleton.skeleton_model["joints"]["left_hip"] return get_body_axis(skeleton, rh, lh) def get_body_y_axis(skeleton): a = skeleton.skeleton_model["joints"]["pelvis"] b = skeleton.skeleton_model["joints"]["head"] return get_body_axis(skeleton, a,b) def get_quaternion_to_axis(skeleton, joint_a, joint_b, axis): ident_f = skeleton.identity_frame ap = skeleton.nodes[joint_a].get_global_position(ident_f) bp = skeleton.nodes[joint_b].get_global_position(ident_f) delta = bp - ap delta /= np.linalg.norm(delta) return quaternion_from_vector_to_vector(axis, delta) def get_body_axis(skeleton, joint_a, joint_b, project=True): ident_f = skeleton.identity_frame ap = skeleton.nodes[joint_a].get_global_position(ident_f) bp = skeleton.nodes[joint_b].get_global_position(ident_f) delta = bp - ap m = np.linalg.norm(delta) if m != 0: delta /= m if project: projection = project_vector_on_axis(delta) return projection / np.linalg.norm(projection) else: return delta else: return None def rotate_axes(cos, q): m = quaternion_matrix(q)[:3, :3] for key, a in list(cos.items()): cos[key] = np.dot(m, a) cos[key] = normalize(cos[key]) return cos def get_child_joint(skeleton, inv_joint_map, node_name): """ Warning output is random if there are more than one child joints and the value is not specified in the JOINT_CHILD_MAP """ child_node = None if len(skeleton.nodes[node_name].children) > 0: child_node = skeleton.nodes[node_name].children[-1] if node_name in inv_joint_map: joint_name = inv_joint_map[node_name] while joint_name in JOINT_CHILD_MAP: child_joint_name = JOINT_CHILD_MAP[joint_name] # check if child joint is mapped joint_key = None if child_joint_name in skeleton.skeleton_model["joints"]: joint_key = skeleton.skeleton_model["joints"][child_joint_name] if joint_key is not None and joint_key in skeleton.nodes: # return child joint child_node = skeleton.nodes[joint_key] return child_node else: #keep traversing until end of child map is reached if child_joint_name in JOINT_CHILD_MAP: joint_name = JOINT_CHILD_MAP[child_joint_name] else: break return child_node def create_local_cos_map_from_skeleton_axes_with_map(skeleton, flip=1.0, project=True): body_x_axis = get_body_x_axis(skeleton)*flip #print("body x axis", body_x_axis) body_y_axis = get_body_y_axis(skeleton) #print("body y axis", body_y_axis) inv_joint_map = dict((v,k) for k, v in skeleton.skeleton_model["joints"].items()) joint_cos_map = dict() for j in list(skeleton.nodes.keys()): joint_cos_map[j] = dict() joint_cos_map[j]["y"] = body_y_axis joint_cos_map[j]["x"] = body_x_axis node = skeleton.nodes[j] child_node = get_child_joint(skeleton, inv_joint_map, node.node_name) if child_node is None: continue y_axis = get_body_axis(skeleton, j, child_node.node_name, project) if y_axis is not None: joint_cos_map[j]["y"] = y_axis #check if the new y axis is similar to the x axis z_vector = np.cross(y_axis, joint_cos_map[j]["x"]) if np.linalg.norm(z_vector) == 0.0: joint_cos_map[j]["x"] = body_y_axis * -np.sum(joint_cos_map[j]["y"]) #check for angle and rotate q = get_quaternion_to_axis(skeleton, j, child_node.node_name, y_axis) rotate_axes(joint_cos_map[j], q) else: joint_cos_map[j]["y"] = None joint_cos_map[j]["x"] = None return joint_cos_map def align_root_joint(axes, global_src_x_vec, max_iter_count=10): # handle special case for the root joint # apply only the y axis rotation of the Hip to the Game_engine node not_aligned = True q = [1, 0, 0, 0] iter_count = 0 while not_aligned: qx, axes = align_axis(axes, "x", global_src_x_vec) # first find rotation to align x axis q = quaternion_multiply(qx, q) q = normalize(q) qy, axes = align_axis(axes, "y", OPENGL_UP_AXIS) # then add a rotation to let the y axis point up q = quaternion_multiply(qy, q) q = normalize(q) dot_y = np.dot(axes["y"], OPENGL_UP_AXIS) dot_y = min(1, max(dot_y, -1)) a_y = math.acos(dot_y) dot_x = np.dot(axes["x"], global_src_x_vec) dot_x = min(1, max(dot_x, -1)) a_x = math.acos(dot_x) iter_count += 1 not_aligned = a_y > 0.1 or a_x > 0.1 and iter_count < max_iter_count return q def align_joint(new_skeleton, free_joint_name, local_target_axes, global_src_up_vec, global_src_x_vec, joint_cos_map, apply_spine_fix=False): # first align the twist axis q, axes = align_axis(local_target_axes, "y", global_src_up_vec) q = normalize(q) # then align the swing axis qx, axes = align_axis(axes, "x", global_src_x_vec) q = quaternion_multiply(qx, q) q = normalize(q) # handle cases when twist axis alignment was lost dot = np.dot(axes["y"], global_src_up_vec) if dot <= -1: q180 = quaternion_about_axis(np.deg2rad(180), global_src_x_vec) q180 = normalize(q180) q = quaternion_multiply(q180, q) q = normalize(q) elif abs(dot) != 1.0: qy, axes = align_axis(axes, "y", global_src_up_vec) q = quaternion_multiply(qy, q) q = normalize(q) return q def find_rotation_analytically(new_skeleton, joint_name, global_src_up_vec, global_src_x_vec, frame, joint_cos_map, apply_spine_fix=False, apply_root_fix=False, max_iter_count=10): local_target_axes = joint_cos_map[joint_name] if joint_name == new_skeleton.root and apply_root_fix: q = align_root_joint(local_target_axes, global_src_x_vec, max_iter_count) else: q = align_joint(new_skeleton, joint_name, local_target_axes, global_src_up_vec, global_src_x_vec, joint_cos_map) return to_local_cos(new_skeleton, joint_name, frame, q) def create_correction_map(target_skeleton,target_to_src_joint_map, src_cos_map, target_cos_map): correction_map = dict() for target_name in target_to_src_joint_map: src_name = target_to_src_joint_map[target_name] if src_name in src_cos_map and target_name is not None and target_name in target_cos_map: src_zero_vector_y = src_cos_map[src_name]["y"] target_zero_vector_y = target_cos_map[target_name]["y"] src_zero_vector_x = src_cos_map[src_name]["x"] target_zero_vector_x = target_cos_map[target_name]["x"] if target_zero_vector_y is not None and src_zero_vector_y is not None: q = quaternion_from_vector_to_vector(target_zero_vector_y, src_zero_vector_y) q = normalize(q) m = quaternion_matrix(q)[:3, :3] target_zero_vector_x = normalize(np.dot(m, target_zero_vector_x)) qx = quaternion_from_vector_to_vector(target_zero_vector_x, src_zero_vector_x) q = quaternion_multiply(qx, q) q = normalize(q) correction_map[target_name] = q return correction_map class Retargeting(object): def __init__(self, src_skeleton, target_skeleton, target_to_src_joint_map, scale_factor=1.0, additional_rotation_map=None, constant_offset=None, place_on_ground=False, force_root_translation=False, ground_height=0): self.src_skeleton = src_skeleton self.target_skeleton = target_skeleton self.target_to_src_joint_map = target_to_src_joint_map self.src_to_target_joint_map = {v: k for k, v in list(self.target_to_src_joint_map.items())} self.scale_factor = scale_factor self.n_params = len(self.target_skeleton.animated_joints) * 4 + 3 self.ground_height = ground_height self.rotation_offsets = additional_rotation_map self.src_inv_joint_map = dict((v,k) for k, v in src_skeleton.skeleton_model["joints"].items()) self.src_child_map = dict() for src_name in self.src_skeleton.animated_joints: src_child = get_child_joint(self.src_skeleton, self.src_inv_joint_map, src_name) if src_child is not None: self.src_child_map[src_name] = src_child.node_name else: self.src_child_map[src_name] = None self.target_cos_map = create_local_cos_map_from_skeleton_axes_with_map(self.target_skeleton) self.src_cos_map = create_local_cos_map_from_skeleton_axes_with_map(self.src_skeleton) if "cos_map" in target_skeleton.skeleton_model: self.target_cos_map.update(target_skeleton.skeleton_model["cos_map"]) if "cos_map" in src_skeleton.skeleton_model: self.src_cos_map.update(src_skeleton.skeleton_model["cos_map"]) self.correction_map = dict() spine_joints = ["pelvis","spine", "spine_1","spine_2"] target_joint_map = self.target_skeleton.skeleton_model["joints"] self.target_spine_joints =[target_joint_map[j] for j in spine_joints if j in target_joint_map] self.correction_map = create_correction_map(self.target_skeleton, self.src_to_target_joint_map, self.src_cos_map, self.target_cos_map) self.constant_offset = constant_offset self.place_on_ground = place_on_ground self.force_root_translation = force_root_translation self.apply_spine_fix = "neck" in target_joint_map and self.src_skeleton.animated_joints != self.target_skeleton.animated_joints if "root" in target_joint_map: self.apply_root_fix = self.target_skeleton.skeleton_model["joints"]["root"] is not None # aligns root up axis with src skeleton up axis # make sure the src root joint in the target is not None target_root = self.target_skeleton.skeleton_model["joints"]["root"] if self.apply_root_fix and target_to_src_joint_map[target_root] is None: target_to_src_joint_map[target_root] = self.src_skeleton.root else: self.apply_root_fix = False if scale_factor <= 0: self.auto_scale_factor() def auto_scale_factor(self): """ estimate scale from leg length by gemlongman """ target_hip_h = self.target_skeleton.get_body_hip2foot_height() src_hip_h = self.src_skeleton.get_body_hip2foot_height() self.scale_factor = target_hip_h / src_hip_h print("debug scale_factor :" + str(target_hip_h)+ " / " +str(src_hip_h) + " = " +str(self.scale_factor)) def rotate_bone(self, src_name, target_name, src_frame, target_frame, guess): q = guess src_x_axis = self.src_cos_map[src_name]["x"] src_up_axis = self.src_cos_map[src_name]["y"] if self.src_cos_map[src_name]["y"] is not None and self.target_cos_map[target_name]["y"] is not None: global_m = self.src_skeleton.nodes[src_name].get_global_matrix(src_frame)[:3, :3] global_src_up_vec = normalize(np.dot(global_m, src_up_axis)) global_src_x_vec = normalize(np.dot(global_m, src_x_axis)) apply_spine_fix = self.apply_spine_fix and target_name in self.target_spine_joints q = find_rotation_analytically(self.target_skeleton, target_name, global_src_up_vec, global_src_x_vec, target_frame, self.target_cos_map, apply_spine_fix, self.apply_root_fix) return q def rotate_bone_fast(self, src_name, target_name, src_frame, target_frame, quess): q = quess src_child_name
# -*- coding: utf-8 -*- """ we test .agg behavior / note that .apply is tested generally in test_groupby.py """ from __future__ import print_function import pytest from datetime import datetime, timedelta from functools import partial import numpy as np from numpy import nan import pandas as pd from pandas import (date_range, MultiIndex, DataFrame, Series, Index, bdate_range, concat) from pandas.util.testing import assert_frame_equal, assert_series_equal from pandas.core.groupby import SpecificationError, DataError from pandas.compat import OrderedDict from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm class TestGroupByAggregate(object): def setup_method(self, method): self.ts = tm.makeTimeSeries() self.seriesd = tm.getSeriesData() self.tsd = tm.getTimeSeriesData() self.frame = DataFrame(self.seriesd) self.tsframe = DataFrame(self.tsd) self.df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) self.df_mixed_floats = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.array( np.random.randn(8), dtype='float32')}) index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.mframe = DataFrame(np.random.randn(10, 3), index=index, columns=['A', 'B', 'C']) self.three_group = DataFrame( {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar', 'foo', 'foo', 'foo'], 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two', 'two', 'two', 'one'], 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny', 'dull', 'shiny', 'shiny', 'shiny'], 'D': np.random.randn(11), 'E': np.random.randn(11), 'F': np.random.randn(11)}) def test_agg_api(self): # GH 6337 # http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error # different api for agg when passed custom function with mixed frame df = DataFrame({'data1': np.random.randn(5), 'data2': np.random.randn(5), 'key1': ['a', 'a', 'b', 'b', 'a'], 'key2': ['one', 'two', 'one', 'two', 'one']}) grouped = df.groupby('key1') def peak_to_peak(arr): return arr.max() - arr.min() expected = grouped.agg([peak_to_peak]) expected.columns = ['data1', 'data2'] result = grouped.agg(peak_to_peak) assert_frame_equal(result, expected) def test_agg_regression1(self): grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.agg(np.mean) expected = grouped.mean() assert_frame_equal(result, expected) def test_agg_datetimes_mixed(self): data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]] df1 = DataFrame({'key': [x[0] for x in data], 'date': [x[1] for x in data], 'value': [x[2] for x in data]}) data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1] else None, row[2]] for row in data] df2 = DataFrame({'key': [x[0] for x in data], 'date': [x[1] for x in data], 'value': [x[2] for x in data]}) df1['weights'] = df1['value'] / df1['value'].sum() gb1 = df1.groupby('date').aggregate(np.sum) df2['weights'] = df1['value'] / df1['value'].sum() gb2 = df2.groupby('date').aggregate(np.sum) assert (len(gb1) == len(gb2)) def test_agg_period_index(self): from pandas import period_range, PeriodIndex prng = period_range('2012-1-1', freq='M', periods=3) df = DataFrame(np.random.randn(3, 2), index=prng) rs = df.groupby(level=0).sum() assert isinstance(rs.index, PeriodIndex) # GH 3579 index = period_range(start='1999-01', periods=5, freq='M') s1 = Series(np.random.rand(len(index)), index=index) s2 = Series(np.random.rand(len(index)), index=index) series = [('s1', s1), ('s2', s2)] df = DataFrame.from_items(series) grouped = df.groupby(df.index.month) list(grouped) def test_agg_dict_parameter_cast_result_dtypes(self): # GH 12821 df = DataFrame( {'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'], 'time': date_range('1/1/2011', periods=8, freq='H')}) df.loc[[0, 1, 2, 5], 'time'] = None # test for `first` function exp = df.loc[[0, 3, 4, 6]].set_index('class') grouped = df.groupby('class') assert_frame_equal(grouped.first(), exp) assert_frame_equal(grouped.agg('first'), exp) assert_frame_equal(grouped.agg({'time': 'first'}), exp) assert_series_equal(grouped.time.first(), exp['time']) assert_series_equal(grouped.time.agg('first'), exp['time']) # test for `last` function exp = df.loc[[0, 3, 4, 7]].set_index('class') grouped = df.groupby('class') assert_frame_equal(grouped.last(), exp) assert_frame_equal(grouped.agg('last'), exp) assert_frame_equal(grouped.agg({'time': 'last'}), exp) assert_series_equal(grouped.time.last(), exp['time']) assert_series_equal(grouped.time.agg('last'), exp['time']) # count exp = pd.Series([2, 2, 2, 2], index=Index(list('ABCD'), name='class'), name='time') assert_series_equal(grouped.time.agg(len), exp) assert_series_equal(grouped.time.size(), exp) exp = pd.Series([0, 1, 1, 2], index=Index(list('ABCD'), name='class'), name='time') assert_series_equal(grouped.time.count(), exp) def test_agg_cast_results_dtypes(self): # similar to GH12821 # xref #11444 u = [datetime(2015, x + 1, 1) for x in range(12)] v = list('aaabbbbbbccd') df = pd.DataFrame({'X': v, 'Y': u}) result = df.groupby('X')['Y'].agg(len) expected = df.groupby('X')['Y'].count() assert_series_equal(result, expected) def test_agg_must_agg(self): grouped = self.df.groupby('A')['C'] pytest.raises(Exception, grouped.agg, lambda x: x.describe()) pytest.raises(Exception, grouped.agg, lambda x: x.index[:2]) def test_agg_ser_multi_key(self): # TODO(wesm): unused ser = self.df.C # noqa f = lambda x: x.sum() results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f) expected = self.df.groupby(['A', 'B']).sum()['C'] assert_series_equal(results, expected) def test_agg_apply_corner(self): # nothing to group, all NA grouped = self.ts.groupby(self.ts * np.nan) assert self.ts.dtype == np.float64 # groupby float64 values results in Float64Index exp = Series([], dtype=np.float64, index=pd.Index( [], dtype=np.float64)) assert_series_equal(grouped.sum(), exp) assert_series_equal(grouped.agg(np.sum), exp) assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False) # DataFrame grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan) exp_df = DataFrame(columns=self.tsframe.columns, dtype=float, index=pd.Index([], dtype=np.float64)) assert_frame_equal(grouped.sum(), exp_df, check_names=False) assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False) def test_agg_grouping_is_list_tuple(self): from pandas.core.groupby import Grouping df = tm.makeTimeDataFrame() grouped = df.groupby(lambda x: x.year) grouper = grouped.grouper.groupings[0].grouper grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper)) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper)) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_aggregate_float64_no_int64(self): # see gh-11199 df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]}) expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5]) expected.index.name = "b" result = df.groupby("b")[["a"]].mean() tm.assert_frame_equal(result, expected) expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5]) expected.index.name = "b" result = df.groupby("b")[["a", "c"]].mean() tm.assert_frame_equal(result, expected) def test_aggregate_api_consistency(self): # GH 9052 # make sure that the aggregates via dict # are consistent df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'two', 'two', 'two', 'one', 'two'], 'C': np.random.randn(8) + 1.0, 'D': np.arange(8)}) grouped = df.groupby(['A', 'B']) c_mean = grouped['C'].mean() c_sum = grouped['C'].sum() d_mean = grouped['D'].mean() d_sum = grouped['D'].sum() result = grouped['D'].agg(['sum', 'mean']) expected = pd.concat([d_sum, d_mean], axis=1) expected.columns = ['sum', 'mean'] assert_frame_equal(result, expected, check_like=True) result = grouped.agg([np.sum, np.mean]) expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1) expected.columns = MultiIndex.from_product([['C', 'D'], ['sum', 'mean']]) assert_frame_equal(result, expected, check_like=True) result = grouped[['D', 'C']].agg([np.sum, np.mean]) expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1) expected.columns = MultiIndex.from_product([['D', 'C'], ['sum', 'mean']]) assert_frame_equal(result, expected, check_like=True) result = grouped.agg({'C': 'mean', 'D': 'sum'}) expected = pd.concat([d_sum, c_mean], axis=1) assert_frame_equal(result, expected, check_like=True) result = grouped.agg({'C': ['mean', 'sum'], 'D': ['mean', 'sum']}) expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1) expected.columns = MultiIndex.from_product([['C', 'D'], ['mean', 'sum']]) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = grouped[['D', 'C']].agg({'r': np.sum, 'r2': np.mean}) expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1) expected.columns = MultiIndex.from_product([['r', 'r2'], ['D', 'C']]) assert_frame_equal(result, expected, check_like=True) def test_agg_dict_renaming_deprecation(self): # 15931 df = pd.DataFrame({'A': [1, 1, 1, 2, 2], 'B': range(5), 'C': range(5)}) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False) as w: df.groupby('A').agg({'B': {'foo': ['sum', 'max']}, 'C': {'bar': ['count', 'min']}}) assert "using a dict with renaming" in str(w[0].message) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): df.groupby('A')[['B', 'C']].agg({'ma': 'max'}) with tm.assert_produces_warning(FutureWarning) as w: df.groupby('A').B.agg({'foo': 'count'}) assert "using a dict on a Series for aggregation" in str( w[0].message) def test_agg_compat(self): # GH 12334 df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'two', 'two', 'two', 'one', 'two'], 'C': np.random.randn(8) + 1.0, 'D': np.arange(8)}) g = df.groupby(['A', 'B']) expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1) expected.columns = MultiIndex.from_tuples([('C', 'sum'), ('C', 'std')]) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = g['D'].agg({'C': ['sum', 'std']}) assert_frame_equal(result, expected, check_like=True) expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1) expected.columns = ['C', 'D'] with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = g['D'].agg({'C': 'sum', 'D': 'std'}) assert_frame_equal(result, expected, check_like=True) def test_agg_nested_dicts(self): # API change for disallowing these types of nested dicts df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'two', 'two', 'two', 'one', 'two'], 'C': np.random.randn(8) + 1.0, 'D': np.arange(8)}) g = df.groupby(['A', 'B']) def f(): g.aggregate({'r1': {'C': ['mean', 'sum']}, 'r2': {'D': ['mean', 'sum']}}) pytest.raises(SpecificationError, f) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = g.agg({'C': {'ra': ['mean', 'std']}, 'D': {'rb': ['mean', 'std']}}) expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(), g['D'].std()], axis=1) expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')]) assert_frame_equal(result, expected, check_like=True) # same name as the original column # GH9052 with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): expected = g['D'].agg({'result1': np.sum, 'result2': np.mean}) expected = expected.rename(columns={'result1': 'D'}) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = g['D'].agg({'D': np.sum, 'result2': np.mean}) assert_frame_equal(result, expected, check_like=True) def test_agg_python_multiindex(self): grouped = self.mframe.groupby(['A', 'B']) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_aggregate_str_func(self): def _check_results(grouped): # single series result = grouped['A'].agg('std') expected = grouped['A'].std() assert_series_equal(result, expected) # group frame by function name result = grouped.aggregate('var') expected = grouped.var() assert_frame_equal(result, expected) # group frame by function dict result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'], ['C', 'mean'], ['D', 'sem']]))
<filename>src/puliclient/jobs.py ''' Created on Jan 11, 2010 @author: <NAME> ''' import sys import site import traceback import logging import subprocess try: import simplejson as json except ImportError: import json # # Errors specific to command execution # class TimeoutError (Exception): ''' Raised when helper execution is too long. ''' class CommandDone(Exception): '''Raised to manually end a command execution.''' class CommandError(Exception): '''Raised to signal failure of a CommandRunner execution.''' class ValidationError(CommandError): '''Raised on a validation error''' class RangeError(CommandError): '''Raised on a validation error where a value given is out of authorized range''' class JobTypeImportError(ImportError): '''Raised when an error occurs while loading a job type through the load function''' class CommandRunnerParameter(object): '''Base class for formal command runner parameter.''' name = None isMandatory = False def __init__(self, default=None, mandatory=None, **kwargs): if default is not None: self.hasDefault = True self.defaultValue = default else: self.hasDefault = False self.defaultValue = None if mandatory is not None: self.isMandatory = True # Set range values if given in args, still need to be validated against value # TODO specialize it in typed parameter classes if 'min' in kwargs: self.minValue = kwargs['min'] if 'max' in kwargs: self.maxValue = kwargs['max'] def validate(self, arguments): if not self.name in arguments and self.isMandatory: raise ValidationError("Mandatory argument \"%s\" is not defined in command arguments" % self.name) if not self.name in arguments and self.hasDefault: arguments[self.name] = self.defaultValue def __repr__(self): return "CommandRunnerParameter(name=%r, default=%r, mandatory=%r)" % (self.name, self.defaultValue, self.isMandatory) def __str__(self): return "%r (default=%r, mandatory=%r)" % (self.name, self.defaultValue, self.isMandatory) class StringParameter(CommandRunnerParameter): '''A command runner parameter class that converts the argument value to a string.''' def validate(self, arguments): try: super(StringParameter, self).validate(arguments) if self.name in arguments: arguments[self.name] = str(arguments[self.name]) except Exception, e: raise e class StringListParameter(CommandRunnerParameter): def validate(self, arguments): try: super(StringListParameter, self).validate(arguments) arguments[self.name] = [str(v) for v in arguments[self.name]] except Exception, e: raise e class BooleanParameter(CommandRunnerParameter): def validate(self, arguments): try: super(BooleanParameter, self).validate(arguments) arguments[self.name] = bool(arguments[self.name]) except Exception, e: raise e class IntegerParameter(CommandRunnerParameter): '''A command runner parameter class that converts the argument value to an integer value.''' minValue = None maxValue = None def validate(self, arguments): try: # Base class will check if argument is present or ensure it has its default value super(IntegerParameter, self).validate(arguments) if arguments[self.name]: newVal = int(arguments[self.name]) # Validate range if defined if self.minValue is not None and newVal < self.minValue: raise RangeError("Argument \"%s\"=%d is less than minimum: %d" % ( self.name, newVal, self.minValue)) if self.maxValue is not None and self.maxValue < newVal: raise RangeError("Argument \"%s\"=%d is more than maximum: %d" % ( self.name, self.maxValue, newVal)) arguments[self.name] = newVal except RangeError, e: raise e except ValidationError, e: raise e def __repr__(self): return "IntParameter(name=%r, default=%r, mandatory=%r, minValue=%r, maxValue=%r )" % ( self.name, self.defaultValue, self.isMandatory, self.minValue, self.maxValue, ) def __str__(self): return "%r (default=%r, mandatory=%r, range=[%r,%r])" % (self.name, self.defaultValue, self.isMandatory, self.minValue, self.maxValue) class FloatParameter(CommandRunnerParameter): '''A command runner parameter class that converts the argument value to an float value.''' minValue = None maxValue = None def validate(self, arguments): try: super(FloatParameter, self).validate(arguments) if arguments[self.name]: newVal = float(arguments[self.name]) # Validate range if defined if self.minValue is not None and newVal < self.minValue: raise RangeError("Argument \"%s\"=%d is less than minimum: %d" % ( self.name, newVal, self.minValue)) if self.maxValue is not None and self.maxValue < newVal: raise RangeError("Argument \"%s\"=%d is more than maximum: %d" % ( self.name, self.maxValue, newVal)) arguments[self.name] = newVal except Exception, e: raise e def __repr__(self): return "FloatParameter(name=%r, default=%r, mandatory=%r, minValue=%r, maxValue=%r )" % ( self.name, self.defaultValue, self.isMandatory, self.minValue, self.maxValue, ) def __str__(self): return "%r (default=%r, mandatory=%r, range=[%r,%r])" % (self.name, self.defaultValue, self.isMandatory, self.minValue, self.maxValue) class CommandRunnerMetaclass(type): def __init__(self, name, bases, attributes): type.__init__(self, name, bases, attributes) parameters = attributes.get('parameters', []) for base in bases: if isinstance(base, CommandRunnerMetaclass): parameters += base.parameters for (name, arg) in attributes.iteritems(): if isinstance(arg, CommandRunnerParameter): arg.name = name parameters.append(arg) self.parameters = parameters # logging.getLogger('puli.commandwatcher').info('init CommandRunnerMetaclass') class CommandRunner(object): __metaclass__ = CommandRunnerMetaclass log = logging.getLogger('puli.runner') scriptTimeOut = None parameters = [] def execute(self, arguments, updateCompletion, updateMessage, updateStats, updateLicense): raise NotImplementedError def validate(self, arguments): logger = logging.getLogger('puli.commandwatcher') if len(self.parameters) > 0: logger.info("Validating %d parameter(s):" % len(self.parameters)) for parameter in self.parameters: logger.info(" - %s" % parameter) parameter.validate(arguments) # TOFIX no need for scripttimeOut, impossible to kill a thread manually, timeout handling # should be done outside the command runner or around subprocess call in the runner # # Checking global argument scriptTimeOut: # try: # self.scriptTimeOut = int(arguments['scriptTimeOut']) # logger.info("Defining time out limit: scriptTimeout=%d" % self.scriptTimeOut) # except KeyError, e: # logger.info("No scriptTimeout in arguments. Command will never be interrupted (msg: %s)" % e) # except TypeError, e: # logger.info("Invalid scriptTimeout value given (integer expected) (msg: %s)" % e) class DefaultCommandRunner(CommandRunner): cmd = StringParameter(mandatory=True) start = IntegerParameter(default=1) end = IntegerParameter(default=1) timeout = IntegerParameter(default=0, min=0) def execute(self, arguments, updateCompletion, updateMessage, updateStats, updateLicense): ''' | Simple execution using the helper. Default argument "cmd" is expected (mandatory) | to start the execution with the current env. | | If a command is defined on a range, the cmd will be executed several time using start/end arguments. | The command can use several standard replacement values: | %%MI_FRAME%% -> replaced by the current frame value | %%MI_START%% -> replaced by the index of the first frame of the range | %%MI_END%% -> replaced by the index of the last frame of the range | | For instance if a command is defined like this: | - start = "10" | - end = "15" | - cmd = "nuke -x -F %%MI_FRAME%% ma_comp.nk" | or cmd = "nuke -x -F %%MI_START%%-%%MI_END%% ma_comp.nk" | | The runner will produce the following execution: | nuke -x -F 10 ma_comp.nk | nuke -x -F 11 ma_comp.nk | nuke -x -F 12 ma_comp.nk | nuke -x -F 13 ma_comp.nk | nuke -x -F 14 ma_comp.nk | nuke -x -F 15 ma_comp.nk ''' cmd = arguments.get('cmd') start = arguments.get('start') end = arguments.get('end') timeout = arguments.get('timeout', None) updateCompletion(0) completion = 0.0 completionIncrement = 1.0 / float((int(end) + 1) - int(start)) for frame in range(start, end + 1): self.log.info("==== Frame %d ====" % frame) currCommand = cmd.replace("%%MI_FRAME%%", str(frame)) currCommand = currCommand.replace("%%MI_START%%", str(start)) currCommand = currCommand.replace("%%MI_END%%", str(end)) self.log.info("Command: %s" % currCommand) subprocess.check_call(currCommand, close_fds=True, shell=True) completion += completionIncrement updateCompletion(completion) self.log.info("Updating completion %f " % completion) updateCompletion(1) class TaskExpander(object): def __init__(self, taskGroup): pass class TaskDecomposer(object): """ | Base class for Decomposer hierarchy. | Implements a minimalist "addCommand" method. """ def __init__(self, task): self.task = task def addCommand(self, name, args, runnerPackages=None, watcherPackages=None): self.task.addCommand(name, args, runnerPackages, watcherPackages) class DefaultTaskDecomposer(TaskDecomposer): """ | Default decomposesr called when no decomposer given for a task. It will use the PuliActionHelper to create one | or several commands on a task. PuliActionHelper's decompose method will have the following behaviour: | - if "framesList" is defined: | create a command for each frame indicated (frameList is a string with frame numbers separated by spaces) | - else: | try to use start/end/packetSize attributes to create several commands (frames grouped by packetSize) | | If no "arguments" dict is given, print a warning and create a single command with empty arguments. """ # DEFAULT FIELDS USED TO DECOMPOSE A TASK START_LABEL = "start" END_LABEL = "end" PACKETSIZE_LABEL = "packetSize" FRAMESLIST_LABEL = "framesList" def __init__(self, task): """ :type task: object """ super(DefaultTaskDecomposer, self).__init__(task) if task.arguments is None: # Create an empty command anyway --> probably unecessary print "WARNING: No arguments given for the task \"%s\", it is necessary to do this ? (we are creating an empty command anyway..." % task.name self.task.addCommand(task.name + "_1_1", {}) elif all(key in task.arguments for key in (self.START_LABEL, self.END_LABEL)) \ or self.FRAMESLIST_LABEL in task.arguments: # if standard attributes exist in arguments, use the PuliHelper to decompose accordingly start = task.arguments.get(self.START_LABEL, 1) end = task.arguments.get(self.END_LABEL, 1) packetSize = task.arguments.get(self.PACKETSIZE_LABEL, 1) framesList = task.arguments.get(self.FRAMESLIST_LABEL, "") self.decompose(start=start, end=end, packetSize=packetSize, callback=self, framesList=framesList) else: # If arguments given but no standard behaviour, simply transmit task arguments to single command self.task.addCommand(task.name, task.arguments) def addCommand(self, packetStart, packetEnd): ''' Default method to add a command with DefaultTaskDecomposer. :param packetStart: Integer representing the first frame :param packetEnd: Integer representing the last frame ''' cmdArgs
import numpy as np import pandas as pd import statsmodels.api as sm import statsmodels.formula.api as smf from stargazer.stargazer import Stargazer from IPython.core.display import HTML from IPython.core.interactiveshell import InteractiveShell from statsmodels.sandbox.regression.gmm import IV2SLS def create_table1(df): #Ethnicity table df_1 = df.sort_values(by="ethnicity_C2", ascending=False) df_1 = df_1[['country', 'ethnicity_C2', 'ethnicity_I']].head() df_1 = df_1.reset_index(drop=True) df_2 = df.sort_values(by="ethnicity_C2", ascending=True) df_2 = df_2[['country', 'ethnicity_C2', 'ethnicity_I']].head() df_2 = df_2.reset_index(drop=True) df_eth = pd.concat([df_1,df_2], axis = 1) df_eth = df_eth.round(decimals=3) #Language table df_3 = df.sort_values(by="language_C2", ascending=False) df_3 = df_3[['country', 'language_C2', 'language_I']].head() df_3 = df_3.reset_index(drop=True) df_4 = df.sort_values(by="language_C2", ascending=True) df_4 = df_4[['country', 'language_C2', 'language_I']].head() df_4 = df_4.reset_index(drop=True) df_lang = pd.concat([df_3,df_4], axis = 1) df_lang = df_lang.round(decimals=3) #Religion table df_5 = df.sort_values(by="religion_C2", ascending=False) df_5 = df_5[['country', 'religion_C2', 'religion_I']].head() df_5 = df_5.reset_index(drop=True) df_6 = df.sort_values(by="religion_C2", ascending=True) df_6 = df_6[['country', 'religion_C2', 'religion_I']].head() df_6 = df_6.reset_index(drop=True) df_rel = pd.concat([df_5,df_6], axis = 1) df_rel = df_rel.round(decimals=3) #Rename rows and columns of complete table table1 = pd.concat([df_eth,df_lang,df_rel],axis = 1) table1.columns = pd.MultiIndex.from_product([['Ethinicity','Language','Religion'], ['Most segregated','Least segregated',], ['Country','$\hat{S}$', '$F$']]) return table1 def create_table2(df): variables = df[['ethnicity_C2','language_C2','religion_C2','ethnicity_C','language_C','religion_C', 'voice','PolStab','GovEffec','RegQual','RulLaw','ConCorr']] table2 = pd.DataFrame() table2 = variables.corr() table2.drop(['voice','PolStab','GovEffec','RegQual','RulLaw','ConCorr'], axis = 1, inplace = True) table2.drop(['ethnicity_C2','ethnicity_C','language_C2','language_C','religion_C2','religion_C'], axis = 0, inplace = True) table2 = table2.round(decimals=2) table2.columns = pd.MultiIndex.from_product([['Segregation indicies'], ['Ethnicity $\hat{S}$','Language $\hat{S}$', 'Religion $\hat{S}$', 'Ethnicity $\tilde{S}$','Language $\tilde{S}$', 'Religion $\tilde{S}$']] ) table2 = table2.rename(index = { "voice" : 'Voice', "Polstab" : 'Political stability', "GovEffec" : 'Government effectiveness', "RegQual" : 'Regulatory quality', "RulLaw" : 'Rule fo law', "ConCorr" : 'Control of corruption'} ) return table2 def table3_7(df, regression_type) : df_3_7E = df[['ethnicity_C2','ethnicity_instrument_C2_thresh','ethnicity_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea', 'LOScandin','democ','mtnall','RulLaw']].dropna(axis=0) df_3_7L = df[['language_C2','language_instrument_C2_thresh','language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea', 'LOScandin','democ','mtnall','RulLaw']].dropna(axis=0) df_3_7R = df[['religion_C2','religion_instrument_C2_thresh','religion_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea', 'LOScandin','democ','mtnall','RulLaw']].dropna(axis=0) exo = sm.add_constant(df_3_7E[['ethnicity_C2','ethnicity_I','lnpopulation','lnGDP_pc','protestants','muslims', 'catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','lnArea', 'democ','mtnall']]) exo2 = sm.add_constant(df_3_7E[['ethnicity_C2','ethnicity_I']]) exo3 = sm.add_constant(df_3_7L[['language_C2','language_I','lnpopulation','lnGDP_pc','protestants','lnArea', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin', 'democ','mtnall']]) exo4 = sm.add_constant(df_3_7L[['language_C2','language_I']]) exo5 = sm.add_constant(df_3_7R[['religion_C2','religion_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea', 'democ','mtnall']]) exo6 = sm.add_constant(df_3_7R[['religion_C2','religion_I']]) if regression_type == 'IV2SLS' : reg = IV2SLS(df_3_7E['RulLaw'], exo, sm.add_constant(df_3_7E[['ethnicity_instrument_C2_thresh','ethnicity_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist', 'LOScandin','democ','mtnall','lnArea']])).fit() reg2 = IV2SLS(df_3_7E['RulLaw'], exo2, sm.add_constant(df_3_7E[['ethnicity_instrument_C2_thresh', 'ethnicity_I']])).fit() reg3 = IV2SLS(df_3_7L['RulLaw'], exo3, sm.add_constant(df_3_7L[['language_instrument_C2_thresh','language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist', 'LOScandin','democ','mtnall','lnArea']])).fit() reg4 = IV2SLS(df_3_7L['RulLaw'], exo4, sm.add_constant(df_3_7L[['language_instrument_C2_thresh', 'language_I']])).fit() reg5 = IV2SLS(df_3_7R['RulLaw'], exo5, sm.add_constant(df_3_7R[['religion_instrument_C2_thresh','religion_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','democ','mtnall','lnArea']])).fit() reg6 = IV2SLS(df_3_7R['RulLaw'], exo6, sm.add_constant(df_3_7R[['religion_instrument_C2_thresh', 'religion_I']])).fit() elif regression_type == 'OLS' : reg2 = sm.OLS(df_3_7E['RulLaw'], exo2).fit(cov_type = 'HC1') reg = sm.OLS(df_3_7E['RulLaw'], exo).fit(cov_type = 'HC1') reg4 = sm.OLS(df_3_7L['RulLaw'], exo4).fit(cov_type = 'HC1') reg3 = sm.OLS(df_3_7L['RulLaw'], exo3).fit(cov_type = 'HC1') reg6 = sm.OLS(df_3_7R['RulLaw'], exo6).fit(cov_type = 'HC1') reg5 = sm.OLS(df_3_7R['RulLaw'], exo5).fit(cov_type = 'HC1') stargazer = Stargazer([reg2, reg, reg4, reg3, reg6, reg5]) stargazer.covariate_order(['ethnicity_C2', 'ethnicity_I','language_C2','language_I','religion_C2','religion_I', 'lnpopulation','lnGDP_pc','lnArea','protestants','muslims','catholics', 'latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ','mtnall','const']) stargazer.rename_covariates({'ethnicity_C2' : 'Segregation $\hat{S}$ (ethnicity)', 'ethnicity_I' : 'Fractionalization $F$ (ethnicity)', 'language_C2' : 'Segregation $\hat{S}$ (language)', 'language_I' : 'Fractionalization $F$ (language)', 'religion_C2' : 'Segregation $\hat{S}$ (religion)', 'religion_I' : 'Fractionalization $F$ (religion)', 'lnpopulation' : 'ln (population)', 'lnGDP_pc' : 'ln (GDP per capita)', 'lnArea' : 'ln (average size of region)', 'protestants' : 'Pretestants share', 'muslims' : 'Muslmis Share', 'catholics' : 'Catholics share', 'latitude' : 'Latitude', 'LOEnglish' : 'English legal origin', 'LOGerman' : 'German legal origin', 'LOSocialist' : 'Socialist legal origin', 'LOScandin' : 'Scandinavian legal origin', 'democ' : 'Democratic tradition', 'mtnall' : 'Mountains', 'const' : 'Constant'}) return HTML(stargazer.render_html()) def table4_5(df,name) : df_table4A = df[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants','muslims', 'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ', 'mtnall','voice','PolStab','GovEffec','RegQual','ConCorr','RulLaw']].dropna(axis = 0) df_table4B = df_table4A[[f'{name}_C2',f'{name}_I','voice','PolStab','GovEffec','RegQual','ConCorr','RulLaw']] df_table4C = df_table4A[df_table4A.democ > 1] xA = sm.add_constant(df_table4A[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','democ','mtnall']]) xB = sm.add_constant(df_table4B[[f'{name}_C2', f'{name}_I']]) xC = sm.add_constant(df_table4C[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','democ','mtnall']]) df_table4s = [df_table4A, df_table4B, df_table4C] xs = [xA, xB, xC] y = [ [f'y{idx}A', f'y{idx}B', f'y{idx}C'] for idx in range(1, 7) ] est = [ [f'est{idx}A', f'est{idx}B', f'est{idx}C'] for idx in range(1, 7) ] star = ['starA','starB','starC'] for idx, i in enumerate(['A','B','C']) : y[0][idx] = df_table4s[idx]['voice'] y[1][idx] = df_table4s[idx]['PolStab'] y[2][idx] = df_table4s[idx]['GovEffec'] y[3][idx] = df_table4s[idx]['RegQual'] y[4][idx] = df_table4s[idx]['RulLaw'] y[5][idx] = df_table4s[idx]['ConCorr'] est[0][idx] = sm.OLS(y[0][idx], xs[idx]).fit(cov_type = 'HC1') est[1][idx] = sm.OLS(y[1][idx], xs[idx]).fit(cov_type = 'HC1') est[2][idx] = sm.OLS(y[2][idx], xs[idx]).fit(cov_type = 'HC1') est[3][idx] = sm.OLS(y[3][idx], xs[idx]).fit(cov_type = 'HC1') est[4][idx] = sm.OLS(y[4][idx], xs[idx]).fit(cov_type = 'HC1') est[5][idx] = sm.OLS(y[5][idx], xs[idx]).fit(cov_type = 'HC1') star[idx] = Stargazer([est[0][idx],est[1][idx],est[2][idx],est[3][idx],est[4][idx],est[5][idx]]) for i in range(3) : star[i].covariate_order([f'{name}_C2',f'{name}_I']) star[i].rename_covariates({f'{name}_C2' : 'Segregation $\hat{S}$ ('f'{name}'')', f'{name}_I' : 'Fractionalization $F$ ('f'{name}'')'}) star[i].show_model_numbers(False) star[i].custom_columns(['Voice', 'Political stability', 'Govern-t effectiv.', 'Regul. quality', 'Rule of law', 'Control of corr'], [1,1,1,1,1,1]) star[0].add_line('Controls', ['Yes','Yes','Yes','Yes','Yes','Yes']) star[0].add_line('Sample', ['Full','Full','Full','Full','Full','Full']) star[1].add_line('Controls', ['No','No','No','No','No','No']) star[1].add_line('Sample', ['Full','Full','Full','Full','Full','Full']) star[2].add_line('Controls', ['Yes','Yes','Yes','Yes','Yes','Yes']) star[2].add_line('Sample', ['Democ','Democ','Democ','Democ','Democ','Democ']) star[0].title('Panel A. Baseline : All controls and full sample') star[1].title('Panel B. No controls and full sample') star[2].title('Panel C. All controls; sample excludes dictatorship') return [star[0],star[1],star[2]] def table6(df, alternative = True) : df_6E = df[['ethnicity_C2', 'ethnicity_I','ethnicity_C','ethnicity_instrument_C_thresh', 'ethnicity_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims', 'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ', 'mtnall','RulLaw','country']].dropna(axis=0) df_6L = df[['language_C2', 'language_I','language_C','language_instrument_C_thresh', 'language_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims', 'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ', 'mtnall','RulLaw','country']].dropna(axis=0) df_6R = df[['religion_C2', 'religion_I','religion_C','religion_instrument_C_thresh', 'religion_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims', 'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ', 'mtnall','RulLaw','country']].dropna(axis=0) df_6E_demo = df_6E[df_6E.democ >= 1] df_6L_demo = df_6L[df_6L.democ >= 1] df_6R_demo = df_6R[df_6R.democ >= 1] x1 = sm.add_constant(df_6E[['ethnicity_instrument_C2_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish', 'LOGerman','LOSocialist','LOScandin','democ','mtnall']]) x2 = sm.add_constant(df_6L[['language_instrument_C2_thresh', 'language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x3 = sm.add_constant(df_6R[['religion_instrument_C2_thresh', 'religion_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','democ','mtnall']]) x4 = sm.add_constant(df_6E_demo[['ethnicity_instrument_C2_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x5 = sm.add_constant(df_6L_demo[['language_instrument_C2_thresh', 'language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x6 = sm.add_constant(df_6R_demo[['religion_instrument_C2_thresh', 'religion_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','democ','mtnall']]) y1 = df_6E['ethnicity_C2'] y2 = df_6L['language_C2'] y3 = df_6R['religion_C2'] y4 = df_6E_demo['ethnicity_C2'] y5 = df_6L_demo['language_C2'] y6 = df_6R_demo['religion_C2'] est1 = sm.OLS(y1, x1).fit(cov_type = 'HC1') est2 = sm.OLS(y2, x2).fit(cov_type = 'HC1') est3 = sm.OLS(y3, x3).fit(cov_type = 'HC1') est4 = sm.OLS(y4, x4).fit(cov_type = 'HC1') est5 = sm.OLS(y5, x5).fit(cov_type = 'HC1') est6 = sm.OLS(y6, x6).fit(cov_type = 'HC1') x1a = sm.add_constant(df_6E[['ethnicity_instrument_C_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x2a = sm.add_constant(df_6L[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x3a = sm.add_constant(df_6R[['religion_instrument_C_thresh', 'religion_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','democ','mtnall']]) x4a = sm.add_constant(df_6E_demo[['ethnicity_instrument_C_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x5a = sm.add_constant(df_6L_demo[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']]) x6a = sm.add_constant(df_6R_demo[['religion_instrument_C_thresh', 'religion_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','democ','mtnall']]) y1a = df_6E['ethnicity_C'] y2a = df_6L['language_C'] y3a = df_6R['religion_C'] y4a = df_6E_demo['ethnicity_C'] y5a = df_6L_demo['language_C'] y6a = df_6R_demo['religion_C'] est1a = sm.OLS(y1a, x1a).fit(cov_type = 'HC1') est2a = sm.OLS(y2a, x2a).fit(cov_type = 'HC1') est3a = sm.OLS(y3a, x3a).fit(cov_type = 'HC1') est4a = sm.OLS(y4a, x4a).fit(cov_type = 'HC1') est5a = sm.OLS(y5a, x5a).fit(cov_type = 'HC1') est6a = sm.OLS(y6a, x6a).fit(cov_type = 'HC1') df_6Lb = df_6L.set_index('country') df_6Lb_demo = df_6L_demo.set_index('country') x2b = sm.add_constant(df_6Lb[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']].drop(index = 'usa')) x5b = sm.add_constant(df_6Lb_demo[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc', 'protestants','muslims','catholics','latitude','LOEnglish','LOGerman', 'LOSocialist','LOScandin','democ','mtnall']].drop(index = 'usa')) y2b = df_6Lb['language_C'].drop(index = 'usa') y5b = df_6Lb_demo['language_C'].drop(index = 'usa') est2b = sm.OLS(y2b, x2b).fit(cov_type = 'HC1') est5b = sm.OLS(y5b, x5b).fit(cov_type = 'HC1') stargazer = Stargazer([est1, est2, est3, est4, est5, est6]) stargazer_a = Stargazer([est1a, est2a, est3a, est4a, est5a, est6a]) stargazer_b = Stargazer([est2b, est5b]) stargazer.covariate_order(['ethnicity_instrument_C2_thresh', 'ethnicity_I', 'language_instrument_C2_thresh', 'language_I', 'religion_instrument_C2_thresh', 'religion_I']) stargazer.rename_covariates({'ethnicity_instrument_C2_thresh':'Instrument E', 'ethnicity_I':'$F$ (ethnicity)', 'language_instrument_C2_thresh':'Instrument L', 'language_I':'$F$ (language)', 'religion_instrument_C2_thresh':'Instrument R', 'religion_I':'$F$ (religion)' }) stargazer.custom_columns(['E$\hat{S}$', 'L$\hat{S}$', 'R$\hat{S}$', 'E$\hat{S}$', 'L$\hat{S}$', 'R$\hat{S}$'], [1,1,1,1,1,1]) stargazer.show_model_numbers(False) stargazer.add_line('Sample', ['Full','Full','Full','Democracy','Democracy','Democracy']) stargazer.title('Panel A. Segregation index $\hat{S}$') stargazer_a.covariate_order(['ethnicity_instrument_C_thresh', 'ethnicity_I', 'language_instrument_C_thresh', 'language_I', 'religion_instrument_C_thresh', 'religion_I']) stargazer_a.rename_covariates({'ethnicity_instrument_C_thresh':'Instrument E', 'ethnicity_I':'$F$ (ethnicity)', 'language_instrument_C_thresh':'Instrument L', 'language_I':'$F$ (language)', 'religion_instrument_C_thresh':'Instrument R', 'religion_I':'$F$ (religion)' }) stargazer_a.custom_columns(['E$\\tilde{S}$', 'L$\\tilde{S}$', 'R$\\tilde{S}$', 'E$\\tilde{S}$', 'L$\\tilde{S}$', 'R$\\tilde{S}$'], [1,1,1,1,1,1]) stargazer_a.show_model_numbers(False) stargazer_a.add_line('Sample', ['Full','Full','Full','Democracy','Democracy','Democracy']) stargazer_a.title('Panel B. Segregation index $\\tilde{S}$') stargazer_b.covariate_order(['language_instrument_C_thresh', 'language_I']) stargazer_b.rename_covariates({'language_instrument_C_thresh':'Instrument L', 'language_I':'$F$ (language)' }) stargazer_b.custom_columns(['L$\\tilde{S}$', 'L$\\tilde{S}$'], [1,1]) stargazer_b.show_model_numbers(False) stargazer_b.add_line('Sample', ['Full','Democracy']) stargazer_b.title('Panel C. Segregation index $\\tilde{S}$ for language with sample excluding the US') return [stargazer,stargazer_a,stargazer_b] def table8_9_ext7(df,name,GDP) : df_8_9A = df[[f'{name}_C2',f'{name}_I',f'{name}_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims', 'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ', 'mtnall','voice','PolStab','GovEffec','RegQual','ConCorr','RulLaw' ]].dropna(axis = 0) df_8_9B = df_8_9A[[f'{name}_C2',f'{name}_instrument_C2_thresh',f'{name}_I','voice','PolStab','GovEffec','RegQual', 'ConCorr','RulLaw']] if GDP == 'democ': df_8_9C = df_8_9A[df_8_9A.democ >= 1] elif GDP == 'GDP': df_8_9C = df_8_9A[df_8_9A.lnGDP_pc >= 7] exoA = sm.add_constant(df_8_9A[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','democ','mtnall']]) exoB = sm.add_constant(df_8_9B[[f'{name}_C2', f'{name}_I']]) exoC = sm.add_constant(df_8_9C[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','democ','mtnall']]) insA = sm.add_constant(df_8_9A[[f'{name}_instrument_C2_thresh',f'{name}_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','democ','mtnall']]) insB = sm.add_constant(df_8_9B[[f'{name}_instrument_C2_thresh', f'{name}_I']]) insC = sm.add_constant(df_8_9C[[f'{name}_instrument_C2_thresh',f'{name}_I','lnpopulation','lnGDP_pc','protestants', 'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','democ','mtnall']]) df_8_9s = [df_8_9A, df_8_9B, df_8_9C] exos = [exoA, exoB, exoC] inss = [insA, insB, insC] y = [ [f'y{idx}A', f'y{idx}B', f'y{idx}C'] for idx in range(1, 7) ] est = [ [f'est{idx}A', f'est{idx}B', f'est{idx}C'] for idx in range(1, 7) ] star = ['starA','starB','starC'] for idx, i in enumerate(['A','B','C']) : y[0][idx] = df_8_9s[idx]['voice'] y[1][idx] = df_8_9s[idx]['PolStab'] y[2][idx] = df_8_9s[idx]['GovEffec'] y[3][idx] = df_8_9s[idx]['RegQual'] y[4][idx] = df_8_9s[idx]['RulLaw'] y[5][idx] = df_8_9s[idx]['ConCorr'] est[0][idx] = IV2SLS(y[0][idx], exos[idx], inss[idx]).fit() est[1][idx] = IV2SLS(y[1][idx], exos[idx], inss[idx]).fit() est[2][idx] = IV2SLS(y[2][idx], exos[idx], inss[idx]).fit() est[3][idx] = IV2SLS(y[3][idx], exos[idx], inss[idx]).fit() est[4][idx] = IV2SLS(y[4][idx], exos[idx], inss[idx]).fit() est[5][idx] = IV2SLS(y[5][idx], exos[idx], inss[idx]).fit() star[idx] = Stargazer([est[0][idx],est[1][idx],est[2][idx],est[3][idx],est[4][idx],est[5][idx]]) for i in range(3) : star[i].covariate_order([f'{name}_C2',f'{name}_I']) star[i].rename_covariates({f'{name}_C2' : 'Segregation $\hat{S}$ ('f'{name}'')', f'{name}_I' : 'Fractionalization $F$ ('f'{name}'')'}) star[i].show_model_numbers(False) star[i].custom_columns(['Voice', 'Political stability', 'Govern-t
not None: _dict['imported'] = self.imported if hasattr(self, 'parameter_sets_total') and self.parameter_sets_total is not None: _dict['parameter_sets_total'] = self.parameter_sets_total if hasattr(self, 'pending') and self.pending is not None: _dict['pending'] = self.pending if hasattr(self, 'renamed') and self.renamed is not None: _dict['renamed'] = self.renamed if hasattr(self, 'replaced') and self.replaced is not None: _dict['replaced'] = self.replaced if hasattr(self, 'sequence_jobs_total') and self.sequence_jobs_total is not None: _dict['sequence_jobs_total'] = self.sequence_jobs_total if hasattr(self, 'skipped') and self.skipped is not None: _dict['skipped'] = self.skipped if hasattr(self, 'subflows_total') and self.subflows_total is not None: _dict['subflows_total'] = self.subflows_total if hasattr(self, 'table_definitions_total') and self.table_definitions_total is not None: _dict['table_definitions_total'] = self.table_definitions_total if hasattr(self, 'total') and self.total is not None: _dict['total'] = self.total if hasattr(self, 'unsupported') and self.unsupported is not None: _dict['unsupported'] = self.unsupported return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this ImportCount object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'ImportCount') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'ImportCount') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class ImportFlow(): """ Import flow object. :attr str conflict_resolution_status: (optional) conflict resolution status. :attr datetime end_time: (optional) The timestamp when the flow import is completed. In format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date-time format as specified by RFC 3339. :attr List[DataImportError] errors: (optional) The errors array report all the problems preventing the data flow from being successfully imported. :attr str id: (optional) Unique id of the data flow. This field is returned only if the underlying data flow has been successfully imported. :attr str job_id: (optional) Unique id of the job. This field is returned only if the corresponding job object has been successfully created. :attr str job_name: (optional) Job name. This field is returned only if the corresponding job object has been successfully created. :attr str job_type: (optional) (deprecated) original type of the job or data flow in the import file. :attr str name: Name of the imported data flow. :attr str original_name: (optional) Name of the data flow to be imported. :attr str ref_asset_id: (optional) The ID of an existing asset this object refers to. If ref_asset_id is specified, the id field will be the same as ref_asset_id for backward compatibility. :attr str status: data import status. :attr str type: (optional) type of the job or data connection in the import file. :attr List[ImportFlowWarning] warnings: (optional) The warnings array report all the warnings in the data flow import operation. """ def __init__(self, name: str, status: str, *, conflict_resolution_status: str = None, end_time: datetime = None, errors: List['DataImportError'] = None, id: str = None, job_id: str = None, job_name: str = None, job_type: str = None, original_name: str = None, ref_asset_id: str = None, type: str = None, warnings: List['ImportFlowWarning'] = None) -> None: """ Initialize a ImportFlow object. :param str name: Name of the imported data flow. :param str status: data import status. :param str conflict_resolution_status: (optional) conflict resolution status. :param datetime end_time: (optional) The timestamp when the flow import is completed. In format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date-time format as specified by RFC 3339. :param List[DataImportError] errors: (optional) The errors array report all the problems preventing the data flow from being successfully imported. :param str id: (optional) Unique id of the data flow. This field is returned only if the underlying data flow has been successfully imported. :param str job_id: (optional) Unique id of the job. This field is returned only if the corresponding job object has been successfully created. :param str job_name: (optional) Job name. This field is returned only if the corresponding job object has been successfully created. :param str job_type: (optional) (deprecated) original type of the job or data flow in the import file. :param str original_name: (optional) Name of the data flow to be imported. :param str ref_asset_id: (optional) The ID of an existing asset this object refers to. If ref_asset_id is specified, the id field will be the same as ref_asset_id for backward compatibility. :param str type: (optional) type of the job or data connection in the import file. :param List[ImportFlowWarning] warnings: (optional) The warnings array report all the warnings in the data flow import operation. """ self.conflict_resolution_status = conflict_resolution_status self.end_time = end_time self.errors = errors self.id = id self.job_id = job_id self.job_name = job_name self.job_type = job_type self.name = name self.original_name = original_name self.ref_asset_id = ref_asset_id self.status = status self.type = type self.warnings = warnings @classmethod def from_dict(cls, _dict: Dict) -> 'ImportFlow': """Initialize a ImportFlow object from a json dictionary.""" args = {} if 'conflict_resolution_status' in _dict: args['conflict_resolution_status'] = _dict.get('conflict_resolution_status') if 'end_time' in _dict: args['end_time'] = string_to_datetime(_dict.get('end_time')) if 'errors' in _dict: args['errors'] = [DataImportError.from_dict(x) for x in _dict.get('errors')] if 'id' in _dict: args['id'] = _dict.get('id') if 'job_id' in _dict: args['job_id'] = _dict.get('job_id') if 'job_name' in _dict: args['job_name'] = _dict.get('job_name') if 'job_type' in _dict: args['job_type'] = _dict.get('job_type') if 'name' in _dict: args['name'] = _dict.get('name') else: raise ValueError('Required property \'name\' not present in ImportFlow JSON') if 'original_name' in _dict: args['original_name'] = _dict.get('original_name') if 'ref_asset_id' in _dict: args['ref_asset_id'] = _dict.get('ref_asset_id') if 'status' in _dict: args['status'] = _dict.get('status') else: raise ValueError('Required property \'status\' not present in ImportFlow JSON') if 'type' in _dict: args['type'] = _dict.get('type') if 'warnings' in _dict: args['warnings'] = [ImportFlowWarning.from_dict(x) for x in _dict.get('warnings')] return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a ImportFlow object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'conflict_resolution_status') and self.conflict_resolution_status is not None: _dict['conflict_resolution_status'] = self.conflict_resolution_status if hasattr(self, 'end_time') and self.end_time is not None: _dict['end_time'] = datetime_to_string(self.end_time) if hasattr(self, 'errors') and self.errors is not None: _dict['errors'] = [x.to_dict() for x in self.errors] if hasattr(self, 'id') and self.id is not None: _dict['id'] = self.id if hasattr(self, 'job_id') and self.job_id is not None: _dict['job_id'] = self.job_id if hasattr(self, 'job_name') and self.job_name is not None: _dict['job_name'] = self.job_name if hasattr(self, 'job_type') and self.job_type is not None: _dict['job_type'] = self.job_type if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'original_name') and self.original_name is not None: _dict['original_name'] = self.original_name if hasattr(self, 'ref_asset_id') and self.ref_asset_id is not None: _dict['ref_asset_id'] = self.ref_asset_id if hasattr(self, 'status') and self.status is not None: _dict['status'] = self.status if hasattr(self, 'type') and self.type is not None: _dict['type'] = self.type if hasattr(self, 'warnings') and self.warnings is not None: _dict['warnings'] = [x.to_dict() for x in self.warnings] return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this ImportFlow object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'ImportFlow') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'ImportFlow') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class ConflictResolutionStatusEnum(str, Enum): """ conflict resolution status. """ FLOW_REPLACEMENT_SUCCEEDED = 'flow_replacement_succeeded' FLOW_REPLACEMENT_FAILED = 'flow_replacement_failed' IMPORT_FLOW_RENAMED = 'import_flow_renamed' IMPORT_FLOW_SKIPPED = 'import_flow_skipped' CONNECTION_REPLACEMENT_SUCCEEDED = 'connection_replacement_succeeded' CONNECTION_REPLACEMENT_FAILED = 'connection_replacement_failed' CONNECTION_RENAMED = 'connection_renamed' CONNECTION_SKIPPED = 'connection_skipped' PARAMETER_SET_REPLACEMENT_SUCCEEDED = 'parameter_set_replacement_succeeded' PARAMETER_SET_REPLACEMENT_FAILED = 'parameter_set_replacement_failed' PARAMETER_SET_RENAMED = 'parameter_set_renamed' PARAMETER_SET_SKIPPED = 'parameter_set_skipped' TABLE_DEFINITION_REPLACEMENT_SUCCEEDED = 'table_definition_replacement_succeeded' TABLE_DEFINITION_REPLACEMENT_FAILED = 'table_definition_replacement_failed' TABLE_DEFINITION_RENAMED = 'table_definition_renamed' TABLE_DEFINITION_SKIPPED = 'table_definition_skipped' SEQUENCE_JOB_REPLACEMENT_SUCCEEDED = 'sequence_job_replacement_succeeded' SEQUENCE_JOB_REPLACEMENT_FAILED = 'sequence_job_replacement_failed' SEQUENCE_JOB_RENAMED = 'sequence_job_renamed' SEQUENCE_JOB_SKIPPED = 'sequence_job_skipped' SUBFLOW_REPLACEMENT_SUCCEEDED = 'subflow_replacement_succeeded' SUBFLOW_REPLACEMENT_FAILED = 'subflow_replacement_failed' SUBFLOW_RENAMED = 'subflow_renamed' SUBFLOW_SKIPPED = 'subflow_skipped' class JobTypeEnum(str, Enum): """ (deprecated) original type of the job or data flow in the import file. """ PX_JOB = 'px_job' SERVER_JOB = 'server_job' CONNECTION = 'connection' TABLE_DEF = 'table_def' class StatusEnum(str, Enum): """ data import status. """ COMPLETED = 'completed' IN_PROGRESS = 'in_progress' FAILED = 'failed' SKIPPED = 'skipped' DEPRECATED = 'deprecated' UNSUPPORTED = 'unsupported' FLOW_CONVERSION_FAILED = 'flow_conversion_failed' FLOW_CREATION_FAILED =
map from the field to the index of the significant filters index_map = { "te": 0, "rna": 1, "ribo": 2 } index = index_map[field] if significant_only: index += 3 return filters[index] @ribo_deprecated def get_up_and_down_masks(condition_1, condition_2, pval_df): """ This function finds all of the transcripts which are, respectively higher or lower in the first condition. That is, "up" and "down" are respective to condition_1. This function is meant to be used with the output of the estimate-kl-pvalues script from the ribo-te package. Args: condition_1, condition_2 (strings): the name of the conditions pval_df (pd.DataFrame): a dataframe, which is just the output of the estimate-kl-pvalues script Returns: All of the return values are boolean masks of pval_df. m_te_up, m_te_down: The transcripts which have higher or lower TE in the first condition, respectively. m_rna_up, m_rna_down: The transcripts which have higher or lower RNA-seq RPKM in the first condition, respectively. m_ribo_up, m_ribo_down: The transcripts which have higher or lower riboseq RPKM in the first condition, respectively. """ import numpy as np te_1 = 'log_translational_efficiency_loc_{}'.format(condition_1) te_2 = 'log_translational_efficiency_loc_{}'.format(condition_2) rna_1 = 'rna_abundance_mean_loc_{}'.format(condition_1) rna_2 = 'rna_abundance_mean_loc_{}'.format(condition_2) ribo_1 = 'ribo_abundance_mean_loc_{}'.format(condition_1) ribo_2 = 'ribo_abundance_mean_loc_{}'.format(condition_2) m_te_up = pval_df[te_1] > pval_df[te_2] m_te_down = ~m_te_up m_rna_up = pval_df[rna_1] > pval_df[rna_2] m_rna_down = ~m_rna_up m_ribo_up = pval_df[ribo_1] > pval_df[ribo_2] m_ribo_down = ~m_ribo_up up_down_masks = m_te_up, m_te_down, m_rna_up, m_rna_down, m_ribo_up, m_ribo_down up_down_masks = [ np.array(f) for f in up_down_masks ] return up_down_masks @ribo_deprecated def get_up_down_filter(filters, field, direction): """ This function returns the appropriate mask to filter on the given field in the given direction. It assumes the filters are in the same order as the output of get_up_and_down_masks. Parameters ---------- filters : tuple The result of the call to get_up_and_down_masks field : string The name of the field on which to filter. Valid options are: * ribo * rna * te direction : string The direction in which to filter. Valid options are: * up * down Returns ------- significance_mask : boolean mask The appropriate mask for filtering for significance based on the given field. """ # just map from the field to the index of the significant filters field_map = { "te": 0, "rna": 2, "ribo": 4 } direction_map = { "up": 0, "down": 1 } index = field_map[field] + direction_map[direction] return filters[index] def melt_te_df(te): """ Melt a data frame from the translational efficiency estimations to a long df suitable for use with seaborn, etc. """ # we only want to keep the mean and var estimates mean_fields_to_keep = [mean_field_map[f] for f in fields] var_fields_to_keep = [var_field_map[f] for f in fields] fields_to_keep = mean_fields_to_keep + var_fields_to_keep # but we need this as a hierarchical index mean_fields = [(f, 'mean') for f in fields] var_fields = [(f, 'var') for f in fields] hierarchical_fields = mean_fields + var_fields # drop the rest of the fields, except gene_id te_df = te.set_index('gene_id') te_df = te_df[fields_to_keep] # add the multi-index for the columns te_df.columns = pd.MultiIndex.from_tuples(hierarchical_fields) # bring the gene_id back te_df = te_df.stack(level=0) te_df.index.names = ["gene_id", "field"] te_df = te_df.reset_index(drop=False) # go ahead and add the pretty name te_df['field_name'] = te_df['field'].map(field_map) return te_df def get_bitseq_estimates( config, isoform_strategy, bitseq_id_field='transcript_id', strings_to_remove=['.cds-only', '.merged']): """ Load the bitseq abundance estimates into a single long data frame. Parameters ---------- config: dict-like The configuration for the project, presumably from the yaml file isoform_strategy: str The strategy for handling transcript isoforms bitseq_id_field: str Name for the "transcript_id" field (second column) in bitseq tr file strings_to_remove: list of strings A list of strings to replace with "" in the bitseq ids Returns ------- bitseq_estimates: pd.DataFrame A data frame containing the following columns * rpkm_{mean,var}: the bitseq estimates * sample: the name of the respective sample * type: "ribo" or "rna" """ import bio_utils.bio as bio import tqdm msg = "Reading the bitseq tr info file" logger.info(msg) # check which transcript file to load is_merged = False if isoform_strategy == "merged": is_merged = True # and get the file transcript_fasta = filenames.get_transcript_fasta( config['genome_base_path'], config['genome_name'], is_annotated=True, is_merged=is_merged, is_cds_only=True ) tr_info = filenames.get_bitseq_transcript_info(transcript_fasta) bitseq_tr = bio.read_bitseq_tr_file(tr_info) # we need to remove all of the indicated strings from the ids for to_remove in strings_to_remove: tids = bitseq_tr['transcript_id'].str.replace(to_remove, "") bitseq_tr['transcript_id'] = tids bitseq_tr = bitseq_tr.rename(columns={'transcript_id': bitseq_id_field}) note = config.get('note', None) all_dfs = [] msg = "Reading riboseq BitSeq estimates" logger.info(msg) is_unique = 'keep_riboseq_multimappers' not in config it = tqdm.tqdm(config['riboseq_samples'].items()) for name, file in it: lengths, offsets = get_periodic_lengths_and_offsets( config, name, isoform_strategy=isoform_strategy, is_unique=is_unique ) bitseq_rpkm_mean = filenames.get_riboseq_bitseq_rpkm_mean( config['riboseq_data'], name, is_unique=is_unique, is_transcriptome=True, is_cds_only=True, length=lengths, offset=offsets, isoform_strategy=isoform_strategy, note=note ) field_names = ['rpkm_mean', 'rpkm_var'] bitseq_rpkm_mean_df = bio.read_bitseq_means( bitseq_rpkm_mean, names=field_names ) bitseq_rpkm_mean_df['sample'] = name bitseq_rpkm_mean_df['type'] = 'ribo' bitseq_rpkm_mean_df[bitseq_id_field] = bitseq_tr[bitseq_id_field] all_dfs.append(bitseq_rpkm_mean_df) # now, the rnaseq msg = "Reading RNA-seq BitSeq estimates" logger.info(msg) is_unique = ('remove_rnaseq_multimappers' in config) it = tqdm.tqdm(config['rnaseq_samples'].items()) for name, data in it: bitseq_rpkm_mean = filenames.get_rnaseq_bitseq_rpkm_mean( config['rnaseq_data'], name, is_unique=is_unique, is_transcriptome=True, is_cds_only=True, isoform_strategy=isoform_strategy, note=note ) field_names = ['rpkm_mean', 'rpkm_var'] bitseq_rpkm_mean_df = bio.read_bitseq_means( bitseq_rpkm_mean, names=field_names ) bitseq_rpkm_mean_df['sample'] = name bitseq_rpkm_mean_df['type'] = 'rna' bitseq_rpkm_mean_df[bitseq_id_field] = bitseq_tr[bitseq_id_field] all_dfs.append(bitseq_rpkm_mean_df) msg = "Joining estimates into long data frame" logger.info(msg) long_df = pd.concat(all_dfs) long_df = long_df.reset_index(drop=True) return long_df def update_gene_id_from_transcript_id(df:pd.DataFrame, config:dict, args=None): """ Assuming "gene_id" is actually a transcript id, replace it with the actual gene identifier. This function is used in the case of the "all" isoform strategy when downstream analysis actually needs a gene identifier. Parameters ---------- df: pd.DataFrame A data frame which contains a "gene_id" field which actually contains transcript identifiers. For example, the latter parts of the B-tea pipeline produce data frames like this with the "all" isoform strategy config: dict Configuration options args: argparse.Namespace or None The logging options from the command line. pyensembl likes to overwrite these, so they will be reset. Returns ------- updated_df: pd.DataFrame A data frame in which the 'gene_id' column is moved to a 'transcript_id' column, and the 'gene_id' column is updated to include actual gene identifiers """ import bio_utils.pyensembl_utils as pyensembl_utils msg = "Loading Ensembl annotations" logger.info(msg) ensembl = pyensembl_utils.get_genome( config['genome_name'], config['gtf'], logging_args = args ) msg = "Finding the gene ids for each transcript id" logger.info(msg) gene_ids = set(df['gene_id']) transcript_gene_mapping = pyensembl_utils.get_gene_ids_of_transcript_ids( gene_ids, ensembl) msg = "Adding gene ids to data frame" logger.info(msg) df['transcript_id'] = df['gene_id'] df = df.drop('gene_id', 1) df = df.merge(transcript_gene_mapping, on='transcript_id') return df ### # These are functions for retrieving the dominant isoform for # each gene and condition. ### def _get_matching_condition(row, condition_field, config): condition = row[condition_field] field = row['field'] # use the ribo conditions for te if field == "te": field = "ribo" return get_criterion_condition(condition, field, config) def _add_matching_conditions(pvalues, config): """ Add the "matching" conditions for both conditions. """ import misc.parallel as parallel # turn off logging; we already know we have matching conditions logger_level = logger.getEffectiveLevel() logger.setLevel("WARNING") matching_condition_1 = parallel.apply_df_simple( pvalues, _get_matching_condition, "condition_1", config ) matching_condition_2 = parallel.apply_df_simple( pvalues, _get_matching_condition, "condition_2", config ) pvalues['matching_condition_1'] = matching_condition_1 pvalues['matching_condition_2'] = matching_condition_2 logger.setLevel(logger_level) return pvalues def _add_transcript_id(pvalues, abundances): """ Use the gene ID an dominant isoform information to pull back the transcript id for each "matching" condition. """ left_on=['matching_condition_1', 'gene_id', 'field'] right_on=['condition', 'gene_id', 'field'] pvalues = pvalues.merge(abundances, left_on=left_on, right_on=right_on) pvalues = pvalues.rename(columns={"transcript_id": "transcript_id_1"}) pvalues = pvalues.drop('condition', 1) left_on=['matching_condition_2', 'gene_id', 'field'] pvalues = pvalues.merge(abundances, left_on=left_on, right_on=right_on) pvalues = pvalues.rename(columns={"transcript_id": "transcript_id_2"}) pvalues = pvalues.drop('condition', 1) return pvalues def get_dominant_transcript_ids(pvalues:pd.DataFrame, config:dict, args): """ Add the transcript id for the dominant isoform in each condition. This function is really only intended to be used with the final pvalues data frame from B-tea. """ # now, we need to get the transcript ids for condition_1 and condition_2 abundance_fields_to_keep = [ 'type', 'transcript_id', 'gene_id', 'condition' ] msg = "Reading abundances" logger.info(msg) note
56, timeout = '0', ), query_log_file = '0', remote_read = [ kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_remote_read.com_coreos_monitoring_v1_Prometheus_spec_remoteRead( bearer_token = '0', bearer_token_file = '0', name = '0', oauth2 = kubernetes.client.models.com_coreos_monitoring_v1_pod_monitor_spec_oauth2.com_coreos_monitoring_v1_PodMonitor_spec_oauth2( kubernetes.client_id = kubernetes.client.models.com_coreos_monitoring_v1_pod_monitor_spec_oauth2_client_id.com_coreos_monitoring_v1_PodMonitor_spec_oauth2_clientId(), kubernetes.client_secret = kubernetes.client.models.com_coreos_monitoring_v1_pod_monitor_spec_oauth2_client_secret.com_coreos_monitoring_v1_PodMonitor_spec_oauth2_clientSecret( key = '0', name = '0', optional = True, ), endpoint_params = { 'key' : '0' }, scopes = [ '0' ], token_url = '0', ), proxy_url = '0', read_recent = True, remote_timeout = '0', required_matchers = { 'key' : '0' }, url = '0', ) ], remote_write = [ kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_remote_write.com_coreos_monitoring_v1_Prometheus_spec_remoteWrite( bearer_token = '0', bearer_token_file = '0', headers = { 'key' : '0' }, metadata_config = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_metadata_config.com_coreos_monitoring_v1_Prometheus_spec_metadataConfig( send = True, send_interval = '0', ), name = '0', proxy_url = '0', queue_config = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_queue_config.com_coreos_monitoring_v1_Prometheus_spec_queueConfig( batch_send_deadline = '0', capacity = 56, max_backoff = '0', max_retries = 56, max_samples_per_send = 56, max_shards = 56, min_backoff = '0', min_shards = 56, ), remote_timeout = '0', send_exemplars = True, url = '0', write_relabel_configs = [ kubernetes.client.models.com_coreos_monitoring_v1_pod_monitor_spec_metric_relabelings.com_coreos_monitoring_v1_PodMonitor_spec_metricRelabelings( action = '0', modulus = 56, regex = '0', replacement = '0', separator = '0', source_labels = [ '0' ], target_label = '0', ) ], ) ], replica_external_label_name = '0', replicas = 56, resources = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_resources_1.com_coreos_monitoring_v1_Alertmanager_spec_resources_1(), retention = '0', retention_size = '0', route_prefix = '0', rule_namespace_selector = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_rule_namespace_selector.com_coreos_monitoring_v1_Prometheus_spec_ruleNamespaceSelector(), rule_selector = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_rule_selector.com_coreos_monitoring_v1_Prometheus_spec_ruleSelector(), rules = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_rules.com_coreos_monitoring_v1_Prometheus_spec_rules( alert = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_rules_alert.com_coreos_monitoring_v1_Prometheus_spec_rules_alert( for_grace_period = '0', for_outage_tolerance = '0', resend_delay = '0', ), ), scrape_interval = '0', scrape_timeout = '0', secrets = [ '0' ], security_context = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_security_context_1.com_coreos_monitoring_v1_Alertmanager_spec_securityContext_1( fs_group = 56, fs_group_change_policy = '0', run_as_group = 56, run_as_non_root = True, run_as_user = 56, supplemental_groups = [ 56 ], sysctls = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_security_context_1_sysctls.com_coreos_monitoring_v1_Alertmanager_spec_securityContext_1_sysctls( name = '0', value = '0', ) ], ), service_account_name = '0', service_monitor_namespace_selector = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_service_monitor_namespace_selector.com_coreos_monitoring_v1_Prometheus_spec_serviceMonitorNamespaceSelector(), service_monitor_selector = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_service_monitor_selector.com_coreos_monitoring_v1_Prometheus_spec_serviceMonitorSelector(), sha = '0', shards = 56, storage = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_storage.com_coreos_monitoring_v1_Prometheus_spec_storage( disable_mount_sub_path = True, empty_dir = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_storage_empty_dir.com_coreos_monitoring_v1_Alertmanager_spec_storage_emptyDir( medium = '0', size_limit = kubernetes.client.models.size_limit.sizeLimit(), ), volume_claim_template = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_storage_volume_claim_template.com_coreos_monitoring_v1_Alertmanager_spec_storage_volumeClaimTemplate( api_version = '0', kind = '0', status = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_storage_volume_claim_template_status.com_coreos_monitoring_v1_Alertmanager_spec_storage_volumeClaimTemplate_status( access_modes = [ '0' ], capacity = { 'key' : None }, conditions = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_storage_volume_claim_template_status_conditions.com_coreos_monitoring_v1_Alertmanager_spec_storage_volumeClaimTemplate_status_conditions( last_probe_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), message = '0', reason = '0', status = '0', type = '0', ) ], phase = '0', ), ), ), tag = '0', thanos = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_thanos.com_coreos_monitoring_v1_Prometheus_spec_thanos( base_image = '0', grpc_server_tls_config = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_thanos_grpc_server_tls_config.com_coreos_monitoring_v1_Prometheus_spec_thanos_grpcServerTlsConfig( ca_file = '0', cert_file = '0', insecure_skip_verify = True, key_file = '0', server_name = '0', ), image = '0', listen_local = True, log_format = '0', log_level = '0', min_time = '0', object_storage_config = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_thanos_object_storage_config.com_coreos_monitoring_v1_Prometheus_spec_thanos_objectStorageConfig( key = '0', name = '0', optional = True, ), object_storage_config_file = '0', ready_timeout = '0', sha = '0', tag = '0', tracing_config = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_thanos_tracing_config.com_coreos_monitoring_v1_Prometheus_spec_thanos_tracingConfig( key = '0', name = '0', optional = True, ), tracing_config_file = '0', version = '0', ), tolerations = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_tolerations.com_coreos_monitoring_v1_Alertmanager_spec_tolerations( effect = '0', key = '0', operator = '0', toleration_seconds = 56, value = '0', ) ], topology_spread_constraints = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_topology_spread_constraints.com_coreos_monitoring_v1_Alertmanager_spec_topologySpreadConstraints( label_selector = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_label_selector.com_coreos_monitoring_v1_Alertmanager_spec_labelSelector(), max_skew = 56, topology_key = '0', when_unsatisfiable = '0', ) ], version = '0', volume_mounts = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_volume_mounts.com_coreos_monitoring_v1_Alertmanager_spec_volumeMounts( mount_path = '0', mount_propagation = '0', name = '0', read_only = True, sub_path = '0', sub_path_expr = '0', ) ], volumes = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_volumes.com_coreos_monitoring_v1_Alertmanager_spec_volumes( aws_elastic_block_store = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_aws_elastic_block_store.com_coreos_monitoring_v1_Alertmanager_spec_awsElasticBlockStore( fs_type = '0', partition = 56, read_only = True, volume_id = '0', ), azure_disk = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_azure_disk.com_coreos_monitoring_v1_Alertmanager_spec_azureDisk( caching_mode = '0', disk_name = '0', disk_uri = '0', fs_type = '0', kind = '0', read_only = True, ), azure_file = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_azure_file.com_coreos_monitoring_v1_Alertmanager_spec_azureFile( read_only = True, secret_name = '0', share_name = '0', ), cephfs = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_cephfs.com_coreos_monitoring_v1_Alertmanager_spec_cephfs( monitors = [ '0' ], path = '0', read_only = True, secret_file = '0', user = '0', ), cinder = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_cinder.com_coreos_monitoring_v1_Alertmanager_spec_cinder( fs_type = '0', read_only = True, volume_id = '0', ), csi = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_csi.com_coreos_monitoring_v1_Alertmanager_spec_csi( driver = '0', fs_type = '0', node_publish_secret_ref = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_csi_node_publish_secret_ref.com_coreos_monitoring_v1_Alertmanager_spec_csi_nodePublishSecretRef( name = '0', ), read_only = True, volume_attributes = { 'key' : '0' }, ), downward_api = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_downward_api.com_coreos_monitoring_v1_Alertmanager_spec_downwardAPI( default_mode = 56, items = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_downward_api_items.com_coreos_monitoring_v1_Alertmanager_spec_downwardAPI_items( mode = 56, path = '0', ) ], ), fc = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_fc.com_coreos_monitoring_v1_Alertmanager_spec_fc( fs_type = '0', lun = 56, read_only = True, target_ww_ns = [ '0' ], wwids = [ '0' ], ), flex_volume = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_flex_volume.com_coreos_monitoring_v1_Alertmanager_spec_flexVolume( driver = '0', fs_type = '0', options = { 'key' : '0' }, read_only = True, ), flocker = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_flocker.com_coreos_monitoring_v1_Alertmanager_spec_flocker( dataset_name = '0', dataset_uuid = '0', ), gce_persistent_disk = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_gce_persistent_disk.com_coreos_monitoring_v1_Alertmanager_spec_gcePersistentDisk( fs_type = '0', partition = 56, pd_name = '0', read_only = True, ), git_repo = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_git_repo.com_coreos_monitoring_v1_Alertmanager_spec_gitRepo( directory = '0', repository = '0', revision = '0', ), glusterfs = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_glusterfs.com_coreos_monitoring_v1_Alertmanager_spec_glusterfs( endpoints = '0', path = '0', read_only = True, ), host_path = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_host_path.com_coreos_monitoring_v1_Alertmanager_spec_hostPath( path = '0', type = '0', ), iscsi = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_iscsi.com_coreos_monitoring_v1_Alertmanager_spec_iscsi( chap_auth_discovery = True, chap_auth_session = True, fs_type = '0', initiator_name = '0', iqn = '0', iscsi_interface = '0', lun = 56, portals = [ '0' ], read_only = True, target_portal = '0', ), name = '0', nfs = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_nfs.com_coreos_monitoring_v1_Alertmanager_spec_nfs( path = '0', read_only = True, server = '0', ), persistent_volume_claim = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_persistent_volume_claim.com_coreos_monitoring_v1_Alertmanager_spec_persistentVolumeClaim( claim_name = '0', read_only = True, ), photon_persistent_disk = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_photon_persistent_disk.com_coreos_monitoring_v1_Alertmanager_spec_photonPersistentDisk( fs_type = '0', pd_id = '0', ), portworx_volume = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_portworx_volume.com_coreos_monitoring_v1_Alertmanager_spec_portworxVolume( fs_type = '0', read_only = True, volume_id = '0', ), projected = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_projected.com_coreos_monitoring_v1_Alertmanager_spec_projected( default_mode = 56, sources = [ kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_projected_sources.com_coreos_monitoring_v1_Alertmanager_spec_projected_sources( service_account_token = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_projected_service_account_token.com_coreos_monitoring_v1_Alertmanager_spec_projected_serviceAccountToken( audience = '0', expiration_seconds = 56, path = '0', ), ) ], ), quobyte = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_quobyte.com_coreos_monitoring_v1_Alertmanager_spec_quobyte( group = '0', read_only = True, registry = '0', tenant = '0', user = '0', volume = '0', ), rbd = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_rbd.com_coreos_monitoring_v1_Alertmanager_spec_rbd( fs_type = '0', image = '0', keyring = '0', monitors = [ '0' ], pool = '0', read_only = True, user = '0', ), scale_io = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_scale_io.com_coreos_monitoring_v1_Alertmanager_spec_scaleIO( fs_type = '0', gateway = '0', protection_domain = '0', read_only = True, secret_ref = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_scale_io_secret_ref.com_coreos_monitoring_v1_Alertmanager_spec_scaleIO_secretRef( name = '0', ), ssl_enabled = True, storage_mode = '0', storage_pool = '0', system = '0', volume_name = '0', ), storageos = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_storageos.com_coreos_monitoring_v1_Alertmanager_spec_storageos( fs_type = '0', read_only = True, volume_name = '0', volume_namespace = '0', ), vsphere_volume = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_vsphere_volume.com_coreos_monitoring_v1_Alertmanager_spec_vsphereVolume( fs_type = '0', storage_policy_id = '0', storage_policy_name = '0', volume_path = '0', ), ) ], wal_compression = True, web = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_spec_web.com_coreos_monitoring_v1_Prometheus_spec_web( page_title = '0', ), ), status = kubernetes.client.models.com_coreos_monitoring_v1_prometheus_status.com_coreos_monitoring_v1_Prometheus_status( available_replicas = 56, paused = True, replicas = 56, unavailable_replicas = 56, updated_replicas = 56, ), ) ], kind = '0', metadata = kubernetes.client.models.v1/list_meta.v1.ListMeta( continue = '0', remaining_item_count = 56, resource_version = '0', self_link = '0', ) ) else :
"amino_acid_position": "196" }, { "gene": "TP53", "chromosome_position": "7578406", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-14-4157", "dna_change": "C->T", "amino_acid_mutation_detail": "R175H", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1a42", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "175" }, { "gene": "TP53", "chromosome_position": "7578460", "amino_acid_wildtype": "V", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-14-4157", "dna_change": "A->C", "amino_acid_mutation_detail": "V157G", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1a43", "amino_acid_mutation": "G", "chromosome": "17", "amino_acid_position": "157" }, { "gene": "TP53", "chromosome_position": "7577539", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-15-1444", "dna_change": "G->A", "amino_acid_mutation_detail": "R209W", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1ad0", "amino_acid_mutation": "W", "chromosome": "17", "amino_acid_position": "209" }, { "gene": "TP53", "chromosome_position": "7578395", "amino_acid_wildtype": "H", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-16-0846", "dna_change": "G->A", "amino_acid_mutation_detail": "H179Y", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1afd", "amino_acid_mutation": "Y", "chromosome": "17", "amino_acid_position": "179" }, { "gene": "TP53", "chromosome_position": "7578268", "amino_acid_wildtype": "L", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-19-1390", "dna_change": "A->C", "amino_acid_mutation_detail": "L35R", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1c96", "amino_acid_mutation": "R", "chromosome": "17", "amino_acid_position": "35" }, { "gene": "TP53", "chromosome_position": "7578542", "amino_acid_wildtype": "L", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-19-1390", "dna_change": "G->A", "amino_acid_mutation_detail": "L130F", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1c97", "amino_acid_mutation": "F", "chromosome": "17", "amino_acid_position": "130" }, { "gene": "TP53", "chromosome_position": "7577548", "amino_acid_wildtype": "G", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-19-2623", "dna_change": "C->T", "amino_acid_mutation_detail": "G245S", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1e74", "amino_acid_mutation": "S", "chromosome": "17", "amino_acid_position": "245" }, { "gene": "TP53", "chromosome_position": "7577568", "amino_acid_wildtype": "C", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-19-2625", "dna_change": "C->T", "amino_acid_mutation_detail": "C238Y", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1f09", "amino_acid_mutation": "Y", "chromosome": "17", "amino_acid_position": "238" }, { "gene": "TP53", "chromosome_position": "7578235", "amino_acid_wildtype": "Y", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-19-2625", "dna_change": "T->C", "amino_acid_mutation_detail": "Y205C", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f1f0a", "amino_acid_mutation": "C", "chromosome": "17", "amino_acid_position": "205" }, { "gene": "TP53", "chromosome_position": "7577121", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-26-1442", "dna_change": "G->A", "amino_acid_mutation_detail": "R273C", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f23cb", "amino_acid_mutation": "C", "chromosome": "17", "amino_acid_position": "273" }, { "gene": "TP53", "chromosome_position": "7577580", "amino_acid_wildtype": "Y", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-26-5133", "dna_change": "T->C", "amino_acid_mutation_detail": "Y234C", "uniprot_id": "P04637", "_id": "53a20770ab47b322147f243c", "amino_acid_mutation": "C", "chromosome": "17", "amino_acid_position": "234" }, { "gene": "TP53", "chromosome_position": "7578534", "amino_acid_wildtype": "K", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-26-5136", "dna_change": "C->A", "amino_acid_mutation_detail": "K132N", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2517", "amino_acid_mutation": "N", "chromosome": "17", "amino_acid_position": "132" }, { "gene": "TP53", "chromosome_position": "7577538", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-27-1830", "dna_change": "C->T", "amino_acid_mutation_detail": "R209Q", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f262a", "amino_acid_mutation": "Q", "chromosome": "17", "amino_acid_position": "209" }, { "gene": "TP53", "chromosome_position": "7578203", "amino_acid_wildtype": "V", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-27-1835", "dna_change": "C->T", "amino_acid_mutation_detail": "V216M", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2781", "amino_acid_mutation": "M", "chromosome": "17", "amino_acid_position": "216" }, { "gene": "TP53", "chromosome_position": "7578217", "amino_acid_wildtype": "T", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-27-1836", "dna_change": "G->A", "amino_acid_mutation_detail": "T211I", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f27c4", "amino_acid_mutation": "I", "chromosome": "17", "amino_acid_position": "211" }, { "gene": "TP53", "chromosome_position": "7577114", "amino_acid_wildtype": "C", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-27-1838", "dna_change": "C->T", "amino_acid_mutation_detail": "C275Y", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2845", "amino_acid_mutation": "Y", "chromosome": "17", "amino_acid_position": "275" }, { "gene": "TP53", "chromosome_position": "7577094", "amino_acid_wildtype": "R", "mutation_type": "Frame_Shift_Del", "patient_id": "TCGA-27-2519", "dna_change": "G->-", "amino_acid_mutation_detail": "R282fs", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f28e6", "amino_acid_mutation": "fs", "chromosome": "17", "amino_acid_position": "282" }, { "gene": "TP53", "chromosome_position": "7577149", "amino_acid_wildtype": "N", "mutation_type": "Frame_Shift_Del", "patient_id": "TCGA-27-2521", "dna_change": "A->-", "amino_acid_mutation_detail": "N263fs", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f292b", "amino_acid_mutation": "fs", "chromosome": "17", "amino_acid_position": "263" }, { "gene": "TP53", "chromosome_position": "7577120", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-28-1753", "dna_change": "C->T", "amino_acid_mutation_detail": "R273H", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2b05", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "273" }, { "gene": "TP53", "chromosome_position": "7577556", "amino_acid_wildtype": "C", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-28-2509", "dna_change": "C->T", "amino_acid_mutation_detail": "C242Y", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2bfe", "amino_acid_mutation": "Y", "chromosome": "17", "amino_acid_position": "242" }, { "gene": "TP53", "chromosome_position": "7577141", "amino_acid_wildtype": "G", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-28-5207", "dna_change": "C->A", "amino_acid_mutation_detail": "G266V", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2d3b", "amino_acid_mutation": "V", "chromosome": "17", "amino_acid_position": "266" }, { "gene": "TP53", "chromosome_position": "7579312", "amino_acid_wildtype": "T", "mutation_type": "Silent", "patient_id": "TCGA-28-5215", "dna_change": "C->T", "amino_acid_mutation_detail": "T125T", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2eed", "amino_acid_mutation": "T", "chromosome": "17", "amino_acid_position": "125" }, { "gene": "TP53", "chromosome_position": "7577535", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-28-5216", "dna_change": "C->A", "amino_acid_mutation_detail": "R249M", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f2f2a", "amino_acid_mutation": "M", "chromosome": "17", "amino_acid_position": "249" }, { "gene": "TP53", "chromosome_position": "7577138", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-32-1970", "dna_change": "C->G", "amino_acid_mutation_detail": "R267P", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f3053", "amino_acid_mutation": "P", "chromosome": "17", "amino_acid_position": "267" }, { "gene": "TP53", "chromosome_position": "7577568", "amino_acid_wildtype": "C", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-32-2491", "dna_change": "C->A", "amino_acid_mutation_detail": "C238F", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f329e", "amino_acid_mutation": "F", "chromosome": "17", "amino_acid_position": "238" }, { "gene": "TP53", "chromosome_position": "7578265", "amino_acid_wildtype": "I", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-32-2491", "dna_change": "A->T", "amino_acid_mutation_detail": "I195N", "uniprot_id": "P04637", "_id": "53a20771ab47b322147f329f", "amino_acid_mutation": "N", "chromosome": "17", "amino_acid_position": "195" }, { "gene": "TP53", "chromosome_position": "7577520", "amino_acid_wildtype": "I", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-32-2634", "dna_change": "A->C", "amino_acid_mutation_detail": "I254S", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f34f8", "amino_acid_mutation": "S", "chromosome": "17", "amino_acid_position": "254" }, { "gene": "TP53", "chromosome_position": "7578406", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-32-4208", "dna_change": "C->T", "amino_acid_mutation_detail": "R175H", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f358a", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "175" }, { "gene": "TP53", "chromosome_position": "7577544", "amino_acid_wildtype": "M", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-32-4210", "dna_change": "A->C", "amino_acid_mutation_detail": "M246R", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3622", "amino_acid_mutation": "R", "chromosome": "17", "amino_acid_position": "246" }, { "gene": "TP53", "chromosome_position": "7578211", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-74-6573", "dna_change": "C->T", "amino_acid_mutation_detail": "R174Q", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3b0e", "amino_acid_mutation": "Q", "chromosome": "17", "amino_acid_position": "174" }, { "gene": "TP53", "chromosome_position": "7577535", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-4925", "dna_change": "C->G", "amino_acid_mutation_detail": "R249T", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3c92", "amino_acid_mutation": "T", "chromosome": "17", "amino_acid_position": "249" }, { "gene": "TP53", "chromosome_position": "7574003", "amino_acid_wildtype": "R", "mutation_type": "Nonsense_Mutation", "patient_id": "TCGA-76-4929", "dna_change": "G->A", "amino_acid_mutation_detail": "R342X", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3dd1", "amino_acid_mutation": "X", "chromosome": "17", "amino_acid_position": "342" }, { "gene": "TP53", "chromosome_position": "7578476", "amino_acid_wildtype": "P", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-4929", "dna_change": "G->A", "amino_acid_mutation_detail": "P152S", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3dd2", "amino_acid_mutation": "S", "chromosome": "17", "amino_acid_position": "152" }, { "gene": "TP53", "chromosome_position": "7577120", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-4934", "dna_change": "C->T", "amino_acid_mutation_detail": "R273H", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3eb4", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "273" }, { "gene": "TP53", "chromosome_position": "7577551", "amino_acid_wildtype": "G", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6193", "dna_change": "C->T", "amino_acid_mutation_detail": "G244S", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3fcf", "amino_acid_mutation": "S", "chromosome": "17", "amino_acid_position": "244" }, { "gene": "TP53", "chromosome_position": "7578464", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6193", "dna_change": "G->A", "amino_acid_mutation_detail": "R156C", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3fd0", "amino_acid_mutation": "C", "chromosome": "17", "amino_acid_position": "156" }, { "gene": "TP53", "chromosome_position": "7578466", "amino_acid_wildtype": "T", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6193", "dna_change": "G->T", "amino_acid_mutation_detail": "T155N", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f3fd1", "amino_acid_mutation": "N", "chromosome": "17", "amino_acid_position": "155" }, { "gene": "TP53", "chromosome_position": "7578457", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6283", "dna_change": "C->T", "amino_acid_mutation_detail": "R158H", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f40a7", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "158" }, { "gene": "TP53", "chromosome_position": "7572986", "amino_acid_wildtype": "Q", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6286", "dna_change": "G->T", "amino_acid_mutation_detail": "Q375K", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f414b", "amino_acid_mutation": "K", "chromosome": "17", "amino_acid_position": "375" }, { "gene": "TP53", "chromosome_position": "7577580", "amino_acid_wildtype": "Y", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6286", "dna_change": "T->C", "amino_acid_mutation_detail": "Y234C", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f414c", "amino_acid_mutation": "C", "chromosome": "17", "amino_acid_position": "234" }, { "gene": "TP53", "chromosome_position": "7578518", "amino_acid_wildtype": "A", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6662", "dna_change": "C->T", "amino_acid_mutation_detail": "A138T", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f4309", "amino_acid_mutation": "T", "chromosome": "17", "amino_acid_position": "138" }, { "gene": "TP53", "chromosome_position": "7577538", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6663", "dna_change": "C->A", "amino_acid_mutation_detail": "R209L", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f4346", "amino_acid_mutation": "L", "chromosome": "17", "amino_acid_position": "209" }, { "gene": "TP53", "chromosome_position": "7577539", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-76-6664", "dna_change": "G->A", "amino_acid_mutation_detail": "R209W", "uniprot_id": "P04637", "_id": "53a20772ab47b322147f438f", "amino_acid_mutation": "W", "chromosome": "17", "amino_acid_position": "209" } ] }, { "tumor_type": "KIRC", "mutations": [ { "gene": "TP53", "chromosome_position": "7577120", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-A3-3308", "dna_change": "C->T", "amino_acid_mutation_detail": "R273H", "uniprot_id": "P04637", "_id": "53a20790ab47b32217429c3f", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "273" }, { "gene": "TP53", "chromosome_position": "7577120", "amino_acid_wildtype": "R", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-A3-3308", "dna_change": "C->T", "amino_acid_mutation_detail": "R273H", "uniprot_id": "P04637", "_id": "53a20790ab47b32217429d22", "amino_acid_mutation": "H", "chromosome": "17", "amino_acid_position": "273" }, { "gene": "TP53", "chromosome_position": "7577547", "amino_acid_wildtype": "G", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-A3-3319", "dna_change": "C->T", "amino_acid_mutation_detail": "G245D", "uniprot_id": "P04637", "_id": "53a20790ab47b3221742a1ce", "amino_acid_mutation": "D", "chromosome": "17", "amino_acid_position": "245" }, { "gene": "TP53", "chromosome_position": "7579396", "amino_acid_wildtype": "V", "mutation_type": "Silent", "patient_id": "TCGA-B0-5092", "dna_change": "G->A", "amino_acid_mutation_detail": "V97V", "uniprot_id": "P04637", "_id": "53a20793ab47b3221742c8d6", "amino_acid_mutation": "V", "chromosome": "17", "amino_acid_position": "97" }, { "gene": "TP53", "chromosome_position": "7578212", "amino_acid_wildtype": "R", "mutation_type": "Nonsense_Mutation", "patient_id": "TCGA-B0-5098", "dna_change": "G->A", "amino_acid_mutation_detail": "R174X", "uniprot_id": "P04637", "_id": "53a20793ab47b3221742ccbd", "amino_acid_mutation": "X", "chromosome": "17", "amino_acid_position": "174" }, { "gene": "TP53", "chromosome_position": "7578272", "amino_acid_wildtype": "H", "mutation_type": "Missense_Mutation", "patient_id": "TCGA-B0-5117", "dna_change": "G->A", "amino_acid_mutation_detail": "H193Y", "uniprot_id": "P04637", "_id": "53a20793ab47b3221742d076", "amino_acid_mutation": "Y", "chromosome":
import yaml import papermill as pm import math import pkg_resources from jinja2 import Environment, FileSystemLoader import yaml import argparse import json import glob import shutil import sys import os #_________________________________________________________________________________________________ # open htnl template file #try: pkg_path = os.environ['PATH_TRIPYVIEW'] #except: pkg_path='' pkg_path = os.path.dirname(os.path.dirname(__file__)) templates_path = os.path.join(pkg_path,'templates_html') templates_nb_path = os.path.join(pkg_path,'templates_notebooks') file_loader = FileSystemLoader(templates_path) env = Environment(loader=file_loader) # # #_______________________________________________________________________________________________________ def drive_hslice(yaml_settings, analysis_name): print(' --> drive_hslice:',analysis_name) # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for vname in driver_settings: # loop over depths for depth in driver_settings[vname]["depths"]: current_params2 = {} current_params2 = current_params.copy() current_params2["vname"] = vname current_params2["depth"] = depth current_params2.update(driver_settings[vname]) del current_params2["depths"] # --> delete depth list [0, 100, 1000,...] from current_param dict() #__________________________________________________________________________________________ if 'proj' in current_params2.keys(): save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{current_params2['proj']}_{depth}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{current_params2['proj']}_{depth}.ipynb" short_name = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{current_params2['proj']}_{depth}" else: save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{depth}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{depth}.ipynb" short_name = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{depth}" current_params2["save_fname"] = os.path.join(yaml_settings['save_path_fig'], save_fname) #__________________________________________________________________________________________ pm.execute_notebook( f"{templates_nb_path}/template_hslice.ipynb", os.path.join(yaml_settings['save_path_nb'], save_fname_nb), parameters=current_params2, nest_asyncio=True, ) #__________________________________________________________________________________________ webpage[f"image_{image_count}"] = {} webpage[f"image_{image_count}"]["name"] = f"{vname.capitalize()} at {depth} m" webpage[f"image_{image_count}"]["path"] = os.path.join('./figures/', save_fname) webpage[f"image_{image_count}"]["path_nb"] = os.path.join('./notebooks/', save_fname_nb) webpage[f"image_{image_count}"]["short_name"] = short_name image_count += 1 return webpage # # #_______________________________________________________________________________________________________ def drive_hslice_clim(yaml_settings, analysis_name): # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for vname in driver_settings: print(f' --> compute:{vname}') # loop over depths for depth in driver_settings[vname]["depths"]: current_params2 = {} current_params2 = current_params.copy() current_params2["vname"] = vname current_params2["depth"] = depth current_params2.update(driver_settings[vname]) del current_params2["depths"] # --> delete depth list [0, 100, 1000,...] from current_param dict() # print(current_params2) #__________________________________________________________________________________________ if 'proj' in current_params2.keys(): save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{current_params2['proj']}_{depth}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{current_params2['proj']}_{depth}.ipynb" short_name = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{current_params2['proj']}_{depth}" else: save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{depth}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{depth}.ipynb" short_name = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{depth}" current_params2["save_fname"] = os.path.join(yaml_settings['save_path_fig'], save_fname) #__________________________________________________________________________________________ pm.execute_notebook( f"{templates_nb_path}/template_hslice_clim.ipynb", os.path.join(yaml_settings['save_path_nb'], save_fname_nb), parameters=current_params2, nest_asyncio=True, ) #__________________________________________________________________________________________ webpage[f"image_{image_count}"] = {} webpage[f"image_{image_count}"]["name"] = f"{vname.capitalize()} at {depth} m" webpage[f"image_{image_count}"]["path"] = os.path.join('./figures/', save_fname) webpage[f"image_{image_count}"]["path_nb"] = os.path.join('./notebooks/', save_fname_nb) webpage[f"image_{image_count}"]["short_name"] = short_name image_count += 1 return webpage # # #_______________________________________________________________________________________________________ def drive_hovm(yaml_settings, analysis_name): # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for vname in driver_settings: print(f' --> compute: {vname}') # loop over depths for box_region in driver_settings[vname]["box_regions"]: print(f' --> compute: {box_region}') current_params2 = {} current_params2 = current_params.copy() current_params2["vname"] = vname current_params2["box_region"] = list([box_region]) current_params2.update(driver_settings[vname]) del current_params2["box_regions"] # --> delete depth list [0, 100, 1000,...] from current_param dict() str_boxregion = box_region.split('/')[-1].split('.')[0] #__________________________________________________________________________________________ save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{str_boxregion}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{str_boxregion}.ipynb" current_params2["save_fname"] = os.path.join(yaml_settings['save_path_fig'], save_fname) #__________________________________________________________________________________________ pm.execute_notebook( f"{templates_nb_path}/template_hovm.ipynb", os.path.join(yaml_settings['save_path_nb'], save_fname_nb), parameters=current_params2, nest_asyncio=True, ) #__________________________________________________________________________________________ webpage[f"image_{image_count}"] = {} webpage[f"image_{image_count}"][ "name" ] = f"{vname.capitalize()} at {str_boxregion} m" webpage[f"image_{image_count}"]["path"] = os.path.join('./figures/', save_fname) webpage[f"image_{image_count}"]["path_nb"] = os.path.join('./notebooks/', save_fname_nb) webpage[f"image_{image_count}"][ "short_name" ] = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{str_boxregion}" image_count += 1 return webpage # # #_______________________________________________________________________________________________________ def drive_hovm_clim(yaml_settings, analysis_name): # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for vname in driver_settings: print(f' --> compute: {vname}') # loop over depths for box_region in driver_settings[vname]["box_regions"]: print(f' --> compute: {box_region}') current_params2 = {} current_params2 = current_params.copy() current_params2["vname"] = vname current_params2["box_region"] = list([box_region]) current_params2.update(driver_settings[vname]) del current_params2["box_regions"] # --> delete depth list [0, 100, 1000,...] from current_param dict() str_boxregion = box_region.split('/')[-1].split('.')[0] #__________________________________________________________________________________________ save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{str_boxregion}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{str_boxregion}.ipynb" current_params2["save_fname"] = os.path.join(yaml_settings['save_path_fig'], save_fname) #__________________________________________________________________________________________ pm.execute_notebook( f"{templates_nb_path}/template_hovm_clim.ipynb", os.path.join(yaml_settings['save_path_nb'], save_fname_nb), parameters=current_params2, nest_asyncio=True, ) #__________________________________________________________________________________________ webpage[f"image_{image_count}"] = {} webpage[f"image_{image_count}"][ "name" ] = f"{vname.capitalize()} at {str_boxregion} m" webpage[f"image_{image_count}"]["path"] = os.path.join('./figures/', save_fname) webpage[f"image_{image_count}"]["path_nb"] = os.path.join('./notebooks/', save_fname_nb) webpage[f"image_{image_count}"][ "short_name" ] = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}_{str_boxregion}" image_count += 1 return webpage # # #_______________________________________________________________________________________________________ def drive_xmoc(yaml_settings, analysis_name): # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for vname in driver_settings: print(f' --> compute: {vname}') current_params2 = {} current_params2 = current_params.copy() current_params2["vname"] = vname # current_params2.update(driver_settings[vname]) #__________________________________________________________________________________________ save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}.ipynb" current_params2["save_fname"] = os.path.join(yaml_settings['save_path_fig'], save_fname) #__________________________________________________________________________________________ pm.execute_notebook( f"{templates_nb_path}/template_xmoc.ipynb", os.path.join(yaml_settings['save_path_nb'], save_fname_nb), parameters=current_params2, nest_asyncio=True) #__________________________________________________________________________________________ webpage[f"image_{image_count}"] = {} webpage[f"image_{image_count}"]["name"] = f"{vname.upper()}" webpage[f"image_{image_count}"]["path"] = os.path.join('./figures/', save_fname) webpage[f"image_{image_count}"]["path_nb"] = os.path.join('./notebooks/', save_fname_nb) webpage[f"image_{image_count}"]["short_name"] = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}" image_count += 1 return webpage # # #_______________________________________________________________________________________________________ def drive_xmoc_tseries(yaml_settings, analysis_name): # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for which_lat in driver_settings['which_lats']: print(f' --> compute tseries @: {str(which_lat)}') current_params2 = {} current_params2 = current_params.copy() current_params2["which_lat"] = [which_lat] current_params2.update(driver_settings) del current_params2["which_lats"] #__________________________________________________________________________________________ save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{str(which_lat)}.png" save_fname_nb = f"{yaml_settings['workflow_name']}_{analysis_name}_{str(which_lat)}.ipynb" current_params2["save_fname"] = os.path.join(yaml_settings['save_path_fig'], save_fname) #__________________________________________________________________________________________ pm.execute_notebook( f"{templates_nb_path}/template_xmoc_tseries.ipynb", os.path.join(yaml_settings['save_path_nb'], save_fname_nb), parameters=current_params2, nest_asyncio=True) #__________________________________________________________________________________________ webpage[f"image_{image_count}"] = {} if which_lat == 'max': webpage[f"image_{image_count}"]["name"] = f" max AMOC @ 30°N<lat<45°N" else: webpage[f"image_{image_count}"]["name"] = f" AMOC @ {str(which_lat)}°N" webpage[f"image_{image_count}"]["path"] = os.path.join('./figures/', save_fname) webpage[f"image_{image_count}"]["path_nb"] = os.path.join('./notebooks/', save_fname_nb) webpage[f"image_{image_count}"]["short_name"] = f"{yaml_settings['workflow_name']}_{analysis_name}_{str(which_lat)}" image_count += 1 return webpage # # #_______________________________________________________________________________________________________ def drive_vprofile(yaml_settings, analysis_name): # copy yaml settings for analysis driver --> hslice: # driver_settings = yaml_settings[analysis_name].copy() # create current primary parameter from yaml settings current_params = {} for key, value in yaml_settings.items(): # if value is a dictionary its not a primary paramter anymore e.g. # hslice: --> dict(...) # temp: # levels: [-2, 30, 41] # depths: [0, 100, 400, 1000] # .... if isinstance(value, dict): pass else: current_params[key] = value # initialse webpage for analyis webpage = {} image_count = 0 # loop over variable name for vname in driver_settings: print(f' --> compute: {vname}') current_params2 = {} current_params2 = current_params.copy() current_params2["vname"] = vname current_params2.update(driver_settings[vname]) #__________________________________________________________________________________________ save_fname = f"{yaml_settings['workflow_name']}_{analysis_name}_{vname}.png" save_fname_nb
# File: threatminerapi_connector.py # # Copyright (c) 2019 Splunk Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions # and limitations under the License. # # # Phantom App imports import phantom.app as phantom from phantom.base_connector import BaseConnector from phantom.action_result import ActionResult # Usage of the consts file is recommended # from threatminerapi_consts import * import requests import json import time import ipaddress from bs4 import BeautifulSoup class RetVal(tuple): def __new__(cls, val1, val2=None): return tuple.__new__(RetVal, (val1, val2)) class ThreatminerApiConnector(BaseConnector): def __init__(self): # Call the BaseConnectors init first super(ThreatminerApiConnector, self).__init__() self._state = None # Variable to hold a base_url in case the app makes REST calls # Do note that the app json defines the asset config, so please # modify this as you deem fit. self._base_url = None def _is_ip(self, input_ip_address): """ Function that checks given address and return True if address is valid IPv4 or IPV6 address. :param input_ip_address: IP address :return: status (success/failure) """ ip_address_input = input_ip_address try: ipaddress.ip_address(unicode(ip_address_input)) except: return False return True def _process_empty_reponse(self, response, action_result): if response.status_code == 200: return RetVal(phantom.APP_SUCCESS, {}) return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None) def _process_html_response(self, response, action_result): # An html response, treat it like an error status_code = response.status_code try: soup = BeautifulSoup(response.text, "html.parser") error_text = soup.text split_lines = error_text.split('\n') split_lines = [x.strip() for x in split_lines if x.strip()] error_text = '\n'.join(split_lines) except: error_text = "Cannot parse error details" message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text) message = message.replace('{', '{{').replace('}', '}}') return RetVal(action_result.set_status(phantom.APP_ERROR, message), None) def _process_json_response(self, r, action_result): # Try a json parse try: resp_json = r.json() except Exception as e: return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".format(str(e))), None) # Please specify the status codes here if 200 <= r.status_code < 399: return RetVal(phantom.APP_SUCCESS, resp_json) # You should process the error returned in the json message = "Error from server. Status Code: {0} Data from server: {1}".format( r.status_code, r.text.replace('{', '{{').replace('}', '}}')) return RetVal(action_result.set_status(phantom.APP_ERROR, message), None) def _process_response(self, r, action_result): # store the r_text in debug data, it will get dumped in the logs if the action fails if hasattr(action_result, 'add_debug_data'): action_result.add_debug_data({'r_status_code': r.status_code}) action_result.add_debug_data({'r_text': r.text}) action_result.add_debug_data({'r_headers': r.headers}) # Process each 'Content-Type' of response separately # Process a json response if 'json' in r.headers.get('Content-Type', ''): return self._process_json_response(r, action_result) # Process an HTML resonse, Do this no matter what the api talks. # There is a high chance of a PROXY in between phantom and the rest of # world, in case of errors, PROXY's return HTML, this function parses # the error and adds it to the action_result. if 'html' in r.headers.get('Content-Type', ''): return self._process_html_response(r, action_result) # it's not content-type that is to be parsed, handle an empty response if not r.text: return self._process_empty_reponse(r, action_result) # everything else is actually an error at this point message = "Can't process response from server. Status Code: {0} Data from server: {1}".format( r.status_code, r.text.replace('{', '{{').replace('}', '}}')) return RetVal(action_result.set_status(phantom.APP_ERROR, message), None) def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get"): config = self.get_config() resp_json = None try: request_func = getattr(requests, method) except AttributeError: return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json) # Create a URL to connect to url = '{0}{1}'.format(self._base_url, endpoint) try: r = request_func( url, # auth=(username, password), # basic authentication data=data, headers=headers, verify=config.get('verify_server_cert', False), params=params) except Exception as e: return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(str(e))), resp_json) return self._process_response(r, action_result) def _handle_test_connectivity(self, param): # Add an action result object to self (BaseConnector) to represent the action for this param action_result = self.add_action_result(ActionResult(dict(param))) # Create a new Threat Miner Python Object endpoint = 'domain.php?q=vwrm.com&rt=1' # Make test connection to the test connectivity endpoint ret_val, response = self._make_rest_call(endpoint, action_result) # Connect to Phantom Endpoint self.save_progress("Connecting to endpoint") if (phantom.is_fail(ret_val)): # the call to the 3rd party device or service failed, action result should contain all the error details # so just return from here self.save_progress("Test Connectivity Failed") message = "Test Connectivity Failed" return action_result.set_status(phantom.APP_ERROR, status_message=message) # Return success self.save_progress("Test Connectivity Passed") return action_result.set_status(phantom.APP_SUCCESS) def _handle_lookup_domain(self, param): # Implement the handler here # use self.save_progress(...) to send progress messages back to the platform self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) # Add an action result object to self (BaseConnector) to represent the action for this param action_result = self.add_action_result(ActionResult(dict(param))) # Access the domain parameter domain = param['domain'] # Create a new Threat Miner Python Object endpoint = 'domain.php?q={}&rt={}'.format(domain, 2) # Issue request to get_domain function ret_val, response = self._make_rest_call(endpoint, action_result) # If the result fails if (phantom.is_fail(ret_val)): # the call to the 3rd party device or service failed, action result should contain all the error details # so just return from here message = ("Lookup Domain at endpoint: {} " "request received a non 200 response".format(endpoint)) return action_result.set_status(phantom.APP_ERROR, status_message=message) # Create new python dictionary to store output data_output = response # Add the response into the data section action_result.add_data(data_output) # Add a dictionary that is made up of the most important values from data into the summary summary = action_result.update_summary({}) summary['domain'] = domain summary['status_message'] = data_output['status_message'] # Return success, no need to set the message, only the status # BaseConnector will create a textual message based off of the summary dictionary return action_result.set_status(phantom.APP_SUCCESS) def _handle_lookup_hash(self, param): # Implement the handler here # use self.save_progress(...) to send progress messages back to the platform self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) # Add an action result object to self (BaseConnector) to represent the action for this param action_result = self.add_action_result(ActionResult(dict(param))) # Access Hash parameter hash_value = param['hash'] # Access Hash Type hash_type = param['hash_type'] if hash_type not in["cryptographic_hash", "ssdeep", "imphash"]: message = ('Invalid hash type, acceptable values ["cryptographic_hash", "ssdeep", "imphash"]') return action_result.set_status(phantom.APP_ERROR, status_message=message) # List the available functions that the hash function provides in the threatMiner API crypto_hash_functions = { 1: "metadata", 2: "http_traffic", 3: "hosts_domains_and_ips", 4: "mutants", 5: "registry_keys", 6: "av_detections", 7: "report_tagging" } # List the available functions that the fuzzy hashing provides in the threatMiner API fuzzy_hash_functions = { 1: "samples", 2: "report_tagging" } # Create an empty dictionary for pushing results into the result_dictionary = {} # If the hash type is a cryptographic hash if hash_type == "cryptographic_hash": self.save_progress("Cryptographic hash selected") # Iterate through the functions for i in range(1, 8): # Make the API call endpoint = 'sample.php?q={}&rt={}'.format(hash_value, i) # Issue request to get_domain function ret_val, response = self._make_rest_call(endpoint, action_result) # Notify user that the request is progressing self.save_progress("Issuing {} request at endpoint: {}".format(i, endpoint)) if (phantom.is_fail(ret_val)): # the call to the 3rd party device or service failed, action result should contain all the error details # so just return from here message = ("Lookup ssdeep at endpoint: {} " "request received a non 200 response".format(endpoint)) return action_result.set_status(phantom.APP_ERROR, status_message=message) # If the call is successfull if int(response['status_code']) == 200: self.save_progress("Received 200 OK from ThreatMiner") # Output the results to a dictionary result_dictionary[crypto_hash_functions[i]] = response # If the call fails else: # Output none to the result_dictionary for the result_dictionary[crypto_hash_functions[i]] = None # Notify user that the command is sleeping self.save_progress("Sleeping 8 seconds to avoid API throttling") # Throttle communication to ensure we do not go over 10 requests per minute. time.sleep(8) # Check to see if all the requests result in a not found response complete_result = all(result_dictionary[crypto_hash_functions[i]] is None for i in range(1, 8)) # If the hash type is a ssdeep hash elif hash_type == "ssdeep": self.save_progress("ssdeep hash selected") # Iterate through the functions for i in range(1, 3): # If the call is successfull # Make the API
<filename>docker_stuff/site-packages/partedit.py<gh_stars>0 #!/usr/bin/env python # pylint:disable=C0302 """ An interactive spreadsheet for viewing, editing, and saving partio (bgeo) files. Usage: % partedit [FLAGS] [bgeoFile] Supported FLAGS: -h/--help: Print this help message """ # TODO: # Support for fixed attribute delete and rename # Support for indexed strings # Tighten up particle table whitespace usage (smaller font? popup matrix?) # Performance - delay widget construction # NEXT UP: # - delete fixed attribute # - rename fixed attribute __copyright__ = """ CONFIDENTIAL INFORMATION: This software is the confidential and proprietary information of Walt Disney Animation Studios ("WDAS"). This software may not be used, disclosed, reproduced or distributed for any purpose without prior written authorization and license from WDAS. Reproduction of any section of this software must include this legend and all copyright notices. Copyright Disney Enterprises, Inc. All rights reserved. """ import os, sys, math import partio # pylint:disable=E0611,E0401 from Qt.QtGui import QKeySequence, QIcon, QIntValidator, QDoubleValidator from Qt.QtWidgets import QShortcut, QApplication, QMainWindow, \ QPushButton, QTableWidget, QLabel, QWidget, QVBoxLayout, QHeaderView,\ QHBoxLayout, QLineEdit, QFileDialog, QFrame, QDialog, QFormLayout, \ QComboBox, QCheckBox, QTableWidgetItem, QSplitter from Qt.QtCore import Qt, QSize, QObject#, pyqtSignal from PyQt5.QtCore import pyqtSignal #------------------------------------------------------------------------------_ _attrTypes = [partio.NONE, partio.VECTOR, partio.FLOAT, partio.INT, partio.INDEXEDSTR] #------------------------------------------------------------------------------ def copy(srcData): """ Creates a copy of the given partio data set """ dstData = partio.create() srcAttrs = [] dstAttrs = [] for anum in range(srcData.numAttributes()): attr = srcData.attributeInfo(anum) srcAttrs.append(attr) dstAttrs.append(dstData.addAttribute(attr.name, attr.type, attr.count)) dstData.addParticles(srcData.numParticles()) for pnum in range(srcData.numParticles()): for anum, srcAttr in enumerate(srcAttrs): dstData.set(dstAttrs[anum], pnum, srcData.get(srcAttr, pnum)) return dstData #-------------------------------------------------------------------------- def getAttrs(numAttributesFunc, attributeInfoFunc, sort=False): """ Return list of tuples of (attributeNum, attribute) """ attrs = [] numAttr = numAttributesFunc() nameToIndex = {attributeInfoFunc(anum).name:anum for anum in range(numAttr)} names = nameToIndex.keys() if sort: names.sort() id_offset = 0 for name in names: anum = nameToIndex[name] attr = attributeInfoFunc(anum) if sort and attr.name == 'id': attrs.insert(0, (anum, attr)) id_offset += 1 elif sort and 'id' in attr.name: attrs.insert(id_offset, (anum, attr)) id_offset += 1 else: attrs.append((anum, attr)) return attrs #-------------------------------------------------------------------------- def copyParticles(src, dst): """ Copy particles from src to dst. """ # Identify the attributes that are in both src and dst srcAttrs = [src.attributeInfo(i) for i in range(src.numAttributes())] dstAttrs = [dst.attributeInfo(i) for i in range(dst.numAttributes())] srcAttrs = {attr.name:attr for attr in srcAttrs} dstAttrs = {attr.name:attr for attr in dstAttrs} attrs = {'src':[], 'dst':[]} for name, srcAttr in srcAttrs.iteritems(): if name in dstAttrs: attrs['src'].append(srcAttr) attrs['dst'].append(dstAttrs[name]) numParticles = src.numParticles() dst.addParticles(numParticles) for pnum in range(numParticles): for anum in range(len(attrs)): dst.set(attrs['dst'][anum], pnum, src.get(attrs['src'][anum], pnum)) #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ class ParticleData(QObject): """ UI Controller class for partio data """ particleAdded = pyqtSignal(int) attributeAdded = pyqtSignal(str) fixedAttributeAdded = pyqtSignal(str) dataReset = pyqtSignal() dirtied = pyqtSignal(bool) def __init__(self): QObject.__init__(self) self.setData(partio.create()) self.filename = None self.dirty = False #-------------------------------------------------------------------------- def setDirty(self, dirty): """ Stores the dirty state of the data """ if dirty != self.dirty: self.dirty = dirty self.dirtied.emit(dirty) #-------------------------------------------------------------------------- def setData(self, data): """ Sets the data, linking class methods to partio methods and notifying all observers that the data set has changed. """ self.data = data self.originalData = copy(data) self.facade() self.dataReset.emit() #-------------------------------------------------------------------------- def facade(self): """ Facades methods through to data """ self.get = self.data.get self.getFixed = self.data.getFixed self.numAttributes = self.data.numAttributes self.numFixedAttributes = self.data.numFixedAttributes self.numParticles = self.data.numParticles self.attributeInfo = self.data.attributeInfo self.fixedAttributeInfo = self.data.fixedAttributeInfo self.indexedStrs = self.data.indexedStrs #-------------------------------------------------------------------------- def set(self, *args): """ Sets a value on the partio data, marking dirty. """ self.setDirty(True) self.data.set(*args) #-------------------------------------------------------------------------- def setFixed(self, *args): """ Sets a fixed attribute value on the partio data, marking dirty. """ self.setDirty(True) self.data.setFixed(*args) #-------------------------------------------------------------------------- def read(self, filename): """ Opens a file from disk and populates the UI """ if not os.path.exists(filename): sys.stderr.write('Invalid filename: {}\n'.format(filename)) return data = partio.read(filename) if not data: sys.stderr.write('Invalid particle file: {}\n'.format(filename)) data = partio.create() self.filename = filename self.setData(data) self.setDirty(False) #-------------------------------------------------------------------------- def write(self, filename, delta): """ Write data to file. If delta is False, saves a full copy of the data, rebaselining. If delta is True, saves only the particles (todo: and attributes) that have changed, but maintains the original baseline """ if not self.data: return # If we're saving a delta, create a new particle set with just # the differences from the original. if delta: data = self.createDelta() else: data = self.data partio.write(filename, data) # If we saved a full copy, rebaseline if not delta: self.filename = filename self.originalData = copy(data) self.setDirty(False) #-------------------------------------------------------------------------- def createDelta(self): """ Creates a delta particle set between the current and original data set. This is the brute-force method, simply comparing the current data set against the original, but it's easier than tracking individual changes. """ def hashParticles(data): """ Given a partio data set, create a dictionary of hashes to indices """ items = {} numAttrs = data.numAttributes() for pnum in range(data.numParticles()): item = [] for anum in range(numAttrs): attr = data.attributeInfo(anum) item.append(data.get(attr, pnum)) items[hash(str(item))] = pnum return items # TODO: Handle new attributes as deltas # For now, any new attributes will write all of the particles # Hash up the new data into an index table newParticles = hashParticles(self.data) oldParticles = hashParticles(self.originalData) # If nothing changed, easy out data = partio.create() if newParticles == oldParticles: return data # Identify which particles changed oldHashes = set(oldParticles.keys()) newHashes = set(newParticles.keys()) modifiedHashes = newHashes - oldHashes # Create the new particle set numAttrs = self.data.numAttributes() newAttrs = [] oldAttrs = [] for anum in range(numAttrs): attr = self.data.attributeInfo(anum) oldAttrs.append(attr) newAttr = data.addAttribute(attr.name, attr.type, attr.count) newAttrs.append(newAttr) data.addParticles(len(modifiedHashes)) for newIndex, modifiedHash in enumerate(modifiedHashes): oldIndex = newParticles[modifiedHash] for anum, oldAttr in enumerate(oldAttrs): value = self.data.get(oldAttr, oldIndex) data.set(newAttrs[anum], newIndex, value) return data #-------------------------------------------------------------------------- def addParticle(self): """ Adds a new particle, emitting its new index. The new particle's values are copied from the last particle. If the particle set has the 'id' attribute, the new particle id is set to max(ids)+1. """ if not self.data: return numParticles = self.numParticles() index = self.data.addParticle() numAttr = self.numAttributes() idAttr = self.attributeInfo('id') if idAttr: newId = max(self.data.get(idAttr, pnum)[0] for pnum in range(numParticles)) + 1 for anum in range(numAttr): attr = self.attributeInfo(anum) if idAttr and attr.name == 'id': value = (newId,) else: value = self.get(attr, numParticles-1) self.set(attr, numParticles, value) self.particleAdded.emit(index) self.setDirty(True) #-------------------------------------------------------------------------- def removeParticles(self, indices): """ Removes the particles at the given indices. partio doesn't support removing data, so we have to construct all new data sans the given particle """ for anum in range(self.data.numAttributes()): attr = self.data.attributeInfo(anum) attributes = [self.data.attributeInfo(anum) for anum in range(self.data.numAttributes())] want = [pnum for pnum in range(self.data.numParticles()) if pnum not in indices ] newData = partio.clone(self.data, False) for attr in attributes: newData.addAttribute(attr.name, attr.type, attr.count) newData.addParticles(len(want)) for i, idx in enumerate(want): for attr in attributes: newData.set(attr, i, self.data.get(attr, idx)) self.setData(newData) self.setDirty(True) #-------------------------------------------------------------------------- def addAttribute(self, name, attrType, count, fixed, defaultValue): """ Adds a new attribute for the particles, returning a handle to the new attribute. """ if not isinstance(defaultValue, tuple): defaultValue = (defaultValue,) if fixed: attr = self.data.addFixedAttribute(name, attrType, count) self.data.setFixed(attr, defaultValue) self.fixedAttributeAdded.emit(attr.name) else: attr = self.data.addAttribute(name, attrType, count) for pnum in range(self.numParticles()): self.data.set(attr, pnum, defaultValue) self.attributeAdded.emit(attr.name) self.setDirty(True) #-------------------------------------------------------------------------- def removeAttributes(self, names): """ Removes the attributes with the given names. partio doesn't support removing data, so we have to construct all new data sans the given attribute(s). """ newData = partio.create() for anum in range(self.numAttributes()): attr = self.attributeInfo(anum) if attr.name not in names: newData.addAttribute(attr.name, attr.type, attr.count) # Copy particle data with new attributes copyParticles(src=self.data, dst=newData) # Copy fixed attributes for anum in range(self.data.numFixedAttributes()): oldAttr = self.data.fixedAttributeInfo(anum) newAttr = newData.addFixedAttribute(oldAttr.name, oldAttr.type, oldAttr.count) newData.setFixed(newAttr, self.data.getFixed(oldAttr)) self.setData(newData) self.setDirty(True) #-------------------------------------------------------------------------- def removeFixedAttributes(self, names): """ Removes the fixed attributes with the given names. partio doesn't support removing data, so we have to construct all new data sans the given attribute(s). """ newData = partio.create() # Copy the regular (non-fixed) attributes and particles for anum in range(self.data.numAttributes()): attr = self.attributeInfo(anum) newData.addAttribute(attr.name, attr.type, attr.count) copyParticles(src=self.data, dst=newData) # Create new fixed attributes for anum in range(self.data.numFixedAttributes()): srcAttr = self.fixedAttributeInfo(anum) if srcAttr.name not in names: dstAttr = newData.addFixedAttribute(srcAttr.name, srcAttr.type, srcAttr.count) newData.setFixed(dstAttr, self.data.getFixed(srcAttr)) self.setData(newData) self.setDirty(True) #------------------------------------------------------------------------------ class NumericalEdit(QLineEdit): # pylint:disable=R0903 """ A LineEdit that auto installs a validator for numerical types """ def __init__(self, value, parent=None): QLineEdit.__init__(self, str(value), parent) self.setAlignment(Qt.AlignRight) if isinstance(value, int): self.setValidator(QIntValidator()) elif isinstance(value, float): self.setValidator(QDoubleValidator()) #------------------------------------------------------------------------------ class AttrWidget(QFrame): # pylint:disable=R0903 """ The
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. import copy import time from sqlalchemy import exc from novaclient.v1_1 import client as nova_client from cloudferrylib.base import compute from cloudferrylib.utils import mysql_connector from cloudferrylib.utils import timeout_exception from cloudferrylib.utils import utils as utl LOG = utl.get_log(__name__) DISK = "disk" LOCAL = ".local" LEN_UUID_INSTANCE = 36 INTERFACES = "interfaces" class NovaCompute(compute.Compute): """The main class for working with Openstack Nova Compute Service. """ def __init__(self, config, cloud): super(NovaCompute, self).__init__() self.config = config self.cloud = cloud self.identity = cloud.resources['identity'] self.mysql_connector = mysql_connector.MysqlConnector(config.mysql, 'nova') self.nova_client = self.proxy(self.get_client(), config) def get_client(self, params=None): """Getting nova client. """ params = self.config if not params else params return nova_client.Client(params.cloud.user, params.cloud.password, params.cloud.tenant, "http://%s:35357/v2.0/" % params.cloud.host) def _read_info_quotas(self, info): user_quotas_cmd = ("SELECT user_id, project_id, resource, " "hard_limit FROM project_user_quotas WHERE " "deleted = 0") for quota in self.mysql_connector.execute(user_quotas_cmd): info['user_quotas'].append( {'quota': {'user_id': quota[0], 'project_id': quota[1], 'resource': quota[2], 'hard_limit': quota[3]}, 'meta': {}}) project_quotas_cmd = ("SELECT project_id, resource, hard_limit " "FROM quotas WHERE deleted = 0") for quota in self.mysql_connector.execute(project_quotas_cmd): info['project_quotas'].append( {'quota': {'project_id': quota[0], 'resource': quota[1], 'hard_limit': quota[2]}, 'meta': {}}) def _read_info_resources(self, **kwargs): """ Read info about compute resources except instances from the cloud. """ info = {'keypairs': {}, 'flavors': {}, 'user_quotas': [], 'project_quotas': []} for keypair in self.get_keypair_list(): info['keypairs'][keypair.id] = self.convert(keypair) for flavor in self.get_flavor_list(): info['flavors'][flavor.id] = self.convert(flavor) if self.config.migrate.migrate_quotas: self._read_info_quotas(info) return info def read_info(self, target='instances', **kwargs): """ Read info from cloud. :param target: Target objects to get info about. Possible values: "instances" or "resources", :param search_opts: Search options to filter out servers (optional). """ if target == 'resources': return self._read_info_resources(**kwargs) if target != 'instances': raise ValueError('Only "resources" or "instances" values allowed') search_opts = kwargs.get('search_opts') info = {'instances': {}} for instance in self.get_instances_list(search_opts=search_opts): info['instances'][instance.id] = self.convert(instance, self.config, self.cloud) return info @staticmethod def convert_instance(instance, cfg, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] compute_res = cloud.resources[utl.COMPUTE_RESOURCE] instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name") instance_host = getattr(instance, 'OS-EXT-SRV-ATTR:host') get_tenant_name = identity_res.get_tenants_func() security_groups = [] for security_group in instance.security_groups: security_groups.append(security_group['name']) interfaces = compute_res.get_networks(instance) volumes = [{'id': v.id, 'num_device': i, 'device': v.device} for i, v in enumerate( compute_res.nova_client.volumes.get_server_volumes( instance.id))] is_ephemeral = compute_res.get_flavor_from_id( instance.flavor['id']).ephemeral > 0 is_ceph = cfg.compute.backend.lower() == utl.CEPH direct_transfer = cfg.migrate.direct_compute_transfer if direct_transfer: ext_cidr = cfg.cloud.ext_cidr host = utl.get_ext_ip(ext_cidr, cloud.getIpSsh(), instance_host) elif is_ceph: host = cfg.compute.host_eph_drv else: host = instance_host instance_block_info = utl.get_libvirt_block_info( instance_name, cloud.getIpSsh(), instance_host) ephemeral_path = { 'path_src': None, 'path_dst': None, 'host_src': host, 'host_dst': None } if is_ephemeral: ephemeral_path['path_src'] = utl.get_disk_path( instance, instance_block_info, is_ceph_ephemeral=is_ceph, disk=DISK+LOCAL) diff = { 'path_src': None, 'path_dst': None, 'host_src': host, 'host_dst': None } if instance.image: diff['path_src'] = utl.get_disk_path( instance, instance_block_info, is_ceph_ephemeral=is_ceph) inst = {'instance': {'name': instance.name, 'instance_name': instance_name, 'id': instance.id, 'tenant_id': instance.tenant_id, 'tenant_name': get_tenant_name( instance.tenant_id), 'status': instance.status, 'flavor_id': instance.flavor['id'], 'image_id': instance.image[ 'id'] if instance.image else None, 'boot_mode': (utl.BOOT_FROM_IMAGE if instance.image else utl.BOOT_FROM_VOLUME), 'key_name': instance.key_name, 'availability_zone': getattr( instance, 'OS-EXT-AZ:availability_zone'), 'security_groups': security_groups, 'boot_volume': copy.deepcopy( volumes[0]) if volumes else None, 'interfaces': interfaces, 'host': instance_host, 'is_ephemeral': is_ephemeral, 'volumes': volumes }, 'ephemeral': ephemeral_path, 'diff': diff, 'meta': {}, } return inst @staticmethod def convert_resources(compute_obj): if isinstance(compute_obj, nova_client.keypairs.Keypair): return {'keypair': {'name': compute_obj.name, 'public_key': compute_obj.public_key}, 'meta': {}} elif isinstance(compute_obj, nova_client.flavors.Flavor): return {'flavor': {'name': compute_obj.name, 'ram': compute_obj.ram, 'vcpus': compute_obj.vcpus, 'disk': compute_obj.disk, 'ephemeral': compute_obj.ephemeral, 'swap': compute_obj.swap, 'rxtx_factor': compute_obj.rxtx_factor, 'is_public': compute_obj.is_public}, 'meta': {}} @staticmethod def convert(obj, cfg=None, cloud=None): res_tuple = (nova_client.keypairs.Keypair, nova_client.flavors.Flavor) if isinstance(obj, nova_client.servers.Server): return NovaCompute.convert_instance(obj, cfg, cloud) elif isinstance(obj, res_tuple): return NovaCompute.convert_resources(obj) LOG.error('NovaCompute converter has received incorrect value. Please ' 'pass to it only instance, keypair or flavor objects.') return None def _deploy_resources(self, info, **kwargs): """ Deploy compute resources except instances to the cloud. :param info: Info about compute resources to deploy, :param identity_info: Identity info. """ identity_info = kwargs.get('identity_info') tenant_map = {tenant['tenant']['id']: tenant['meta']['new_id'] for tenant in identity_info['tenants']} user_map = {user['user']['id']: user['meta']['new_id'] for user in identity_info['users']} self._deploy_keypair(info['keypairs']) self._deploy_flavors(info['flavors']) if self.config['migrate']['migrate_quotas']: self._deploy_project_quotas(info['project_quotas'], tenant_map) self._deploy_user_quotas(info['user_quotas'], tenant_map, user_map) new_info = self.read_info(target='resources') return new_info def deploy(self, info, target='instances', **kwargs): """ Deploy compute resources to the cloud. :param target: Target objects to deploy. Possible values: "instances" or "resources", :param identity_info: Identity info. """ info = copy.deepcopy(info) if target == 'resources': info = self._deploy_resources(info, **kwargs) elif target == 'instances': info = self._deploy_instances(info) else: raise ValueError('Only "resources" or "instances" values allowed') return info def _deploy_user_quotas(self, quotas, tenant_map, user_map): insert_cmd = ("INSERT INTO project_user_quotas (user_id, project_id, " "resource, hard_limit, deleted) VALUES ('%s', '%s', '%s'" ", %s, 0)") update_cmd = ("UPDATE project_user_quotas SET hard_limit=%s WHERE " "user_id='%s' AND project_id='%s' AND resource='%s' AND " "deleted=0") for _quota in quotas: quota = _quota['quota'] try: self.mysql_connector.execute(insert_cmd % ( user_map[quota['user_id']], tenant_map[quota['project_id']], quota['resource'], quota['hard_limit'])) except exc.IntegrityError as e: if 'Duplicate entry' in e.message: self.mysql_connector.execute(update_cmd % ( quota['hard_limit'], user_map[quota['user_id']], tenant_map[quota['project_id']], quota['resource'], )) else: raise def _deploy_project_quotas(self, quotas, tenant_map): insert_cmd = ("INSERT INTO quotas (project_id, resource, " "hard_limit, deleted) VALUES ('%s', '%s', %s, 0)") update_cmd = ("UPDATE quotas SET hard_limit=%s WHERE project_id='%s' " "AND resource='%s' AND deleted=0") for _quota in quotas: quota = _quota['quota'] try: self.mysql_connector.execute(insert_cmd % ( tenant_map[quota['project_id']], quota['resource'], quota['hard_limit'])) except exc.IntegrityError as e: if 'Duplicate entry' in e.message: self.mysql_connector.execute(update_cmd % ( quota['hard_limit'], tenant_map[quota['project_id']], quota['resource'], )) else: raise def _deploy_keypair(self, keypairs): dest_keypairs = [keypair.name for keypair in self.get_keypair_list()] for _keypair in keypairs.itervalues(): keypair = _keypair['keypair'] if keypair['name'] in dest_keypairs: continue self.create_keypair(keypair['name'], keypair['public_key']) def _deploy_flavors(self, flavors): dest_flavors = {flavor.name: flavor.id for flavor in self.get_flavor_list()} for flavor_id, _flavor in flavors.iteritems(): flavor = _flavor['flavor'] if flavor['name'] in dest_flavors: # _flavor['meta']['dest_id'] = dest_flavors[flavor['name']] _flavor['meta']['id'] = dest_flavors[flavor['name']] continue _flavor['meta']['id'] = self.create_flavor( name=flavor['name'], flavorid=flavor_id, ram=flavor['ram'], vcpus=flavor['vcpus'], disk=flavor['disk'], ephemeral=flavor['ephemeral'], swap=int(flavor['swap']) if flavor['swap'] else 0, rxtx_factor=flavor['rxtx_factor'], is_public=flavor['is_public']).id def _deploy_instances(self, info_compute): new_ids = {} nova_tenants_clients = { self.config['cloud']['tenant']: self.nova_client} params = {'user': self.config['cloud']['user'], 'password': <PASSWORD>['<PASSWORD>']['password'], 'tenant': self.config['cloud']['tenant'], 'host': self.config['cloud']['host']} for _instance in info_compute['instances'].itervalues(): tenant_name = _instance['instance']['tenant_name'] if tenant_name not in nova_tenants_clients: params['tenant'] = tenant_name nova_tenants_clients[tenant_name] = self.get_nova_client( params) for _instance in info_compute['instances'].itervalues(): instance = _instance['instance'] meta = _instance['meta'] self.nova_client = nova_tenants_clients[instance['tenant_name']] create_params = {'name': instance['name'], 'flavor': instance['flavor_id'], 'key_name': instance['key_name'], 'availability_zone': instance[ 'availability_zone'], 'nics': instance['nics'], 'image': instance['image_id']} if instance['boot_mode'] == utl.BOOT_FROM_VOLUME: volume_id = instance['volumes'][0]['id'] create_params["block_device_mapping_v2"] = [{ "source_type": "volume", "uuid": volume_id, "destination_type": "volume", "delete_on_termination": True, "boot_index": 0 }] create_params['image'] = None new_id = self.create_instance(**create_params) new_ids[new_id] = instance['id'] self.nova_client = nova_tenants_clients[self.config['cloud']['tenant']] return new_ids def create_instance(self, **kwargs): return self.nova_client.servers.create(**kwargs).id def get_instances_list(self, detailed=True, search_opts=None, marker=None, limit=None): ids = search_opts.get('id', None) if search_opts else None if not ids: return self.nova_client.servers.list(detailed=detailed, search_opts=search_opts, marker=marker, limit=limit) else: if type(ids) is list: return [self.nova_client.servers.get(i) for i in ids] else: return [self.nova_client.servers.get(ids)] def get_instance(self, instance_id): return self.get_instances_list(search_opts={'id': instance_id})[0] def change_status(self, status, instance=None, instance_id=None): if instance_id: instance = self.nova_client.servers.get(instance_id) curr = self.get_status(self.nova_client.servers, instance.id).lower() will = status.lower() func_restore = { 'start': lambda instance: instance.start(), 'stop': lambda instance: instance.stop(), 'resume': lambda instance: instance.resume(), 'paused': lambda instance: instance.pause(), 'unpaused': lambda instance: instance.unpause(), 'suspend': lambda instance: instance.suspend(), 'status': lambda status: lambda instance: self.wait_for_status( instance_id, status) } map_status = { 'paused': { 'active': (func_restore['unpaused'], func_restore['status']('active')), 'shutoff': (func_restore['stop'], func_restore['status']('shutoff')), 'suspend': (func_restore['unpaused'], func_restore['status']('active'), func_restore['suspend'], func_restore['status']('suspend')) }, 'suspend': { 'active': (func_restore['resume'], func_restore['status']('active')), 'shutoff': (func_restore['stop'], func_restore['status']('shutoff')), 'paused': (func_restore['resume'], func_restore['status']('active'), func_restore['paused'], func_restore['status']('paused')) }, 'active': { 'paused': (func_restore['paused'], func_restore['status']('paused')), 'suspend': (func_restore['suspend'], func_restore['status']('suspend')), 'shutoff': (func_restore['stop'], func_restore['status']('shutoff')) }, 'shutoff': { 'active': (func_restore['start'], func_restore['status']('active')), 'paused': (func_restore['start'], func_restore['status']('active'), func_restore['paused'], func_restore['status']('paused')), 'suspend': (func_restore['start'], func_restore['status']('active'), func_restore['suspend'], func_restore['status']('suspend')) } } if curr != will: try: reduce(lambda res, f: f(instance), map_status[curr][will], None) except timeout_exception.TimeoutException as e: return e else: return True def wait_for_status(self, id_obj, status, limit_retry=90): count = 0 getter = self.nova_client.servers while getter.get(id_obj).status.lower() != status.lower(): time.sleep(2) count += 1 if count > limit_retry: raise timeout_exception.TimeoutException( getter.get(id_obj).status.lower(), status, "Timeout exp") def get_flavor_from_id(self, flavor_id): return self.nova_client.flavors.get(flavor_id) def get_flavor_list(self, **kwargs): return self.nova_client.flavors.list(**kwargs) def create_flavor(self, **kwargs): return self.nova_client.flavors.create(**kwargs) def delete_flavor(self, flavor_id): self.nova_client.flavors.delete(flavor_id) def get_keypair_list(self): return self.nova_client.keypairs.list() def get_keypair(self, name): return self.nova_client.keypairs.get(name) def
<filename>PYTHON/get_era5.py #!/usr/bin/python # Code to read in years of hourly high resolution ERA5 t, td and land_sea_mask data # Converts to q, RH, e, tw and DPD # Aggregates to daily average # Regrids to 1by1 degree and shifts to -179.5 ot 179.5 and 180 lats from 89.5 to -89.5 (was 181 lats!!!) # Outputs as netCDF # Later code will read in and convert to pentads, monthlies, anomalies etc and combine to make complete record or # append the latest year # This can also spin through all years or cope with annual updates #******************************************* # START #******************************************* import os import datetime as dt import calendar import numpy as np import sys import time import pdb import iris import iris.coord_categorisation from iris.coords import DimCoord from iris.cube import Cube import cf_units import CalcHums as ch #import utils #sys.path.append('/data/users/rdunn/reanalyses/code/era5/cdsapi-0.1.4') #sys.path.append('/data/users/hadkw/WORKING_HADISDH/UPDATE2019/PROGS/PYTHON/cdsapi-0.1.4') sys.path.append('/home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/cdsapi-0.1.4') import cdsapi # Set up directory #DataLoc = '/data/users/hadkw/WORKING_HADISDH/UPDATE2019/OTHERDATA/ERA5/' DataLoc = '/scratch/hadkw/UPDATE2020/OTHERDATA/ERA5/' print(DataLoc) """ Butchered from http://fcm1.metoffice.com/projects/utils/browser/CM_ML/trunk/NAO_Precip_Regr/get_era5_uwind.py and /data/users/rdunn/reanalysis/era5/get_era.py """ #**************************************** def retrieve(year, variable, month, ndays): ''' Use ECMWF API to get the data 4.5GB per month --> 55GB per year, 50mins per month of processing ''' if variable == "2m_temperature": varlist = ["2m_temperature"] # varlist = ["2m_temperature", "land_sea_mask"] # if you want to download both at once elif variable == "2m_dewpoint_temperature": varlist = ["2m_dewpoint_temperature"] # varlist = ["2m_dewpoint_temperature", "land_sea_mask"] elif variable == "surface_pressure": varlist = ["surface_pressure"] elif variable == "land_sea_mask": varlist = ["land_sea_mask"] else: print("please provide correct variable to download") return days = ["{:2d}".format(d+1) for d in range(ndays)] c = cdsapi.Client() c.retrieve( 'reanalysis-era5-single-levels', { 'product_type':'reanalysis', 'format':'netcdf', 'variable':varlist, 'year':"{}".format(year), 'month':"{:02d}".format(month), 'day':days, 'time':[ '00:00','01:00','02:00', '03:00','04:00','05:00', '06:00','07:00','08:00', '09:00','10:00','11:00', '12:00','13:00','14:00', '15:00','16:00','17:00', '18:00','19:00','20:00', '21:00','22:00','23:00', ] }, os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)) ) # time.sleep(5) # to allow any writing process to finish up. # # make a "success" file # with open(os.path.join(DataLoc, "{}{:02d}_{}_success.txt".format(year, month, variable)), "w") as outfile: # # outfile.write("Success {}".format(dt.datetime.now())) return # retreive #**************************************** def check_files(year, variable, month, ndays): ''' This reads in the t, td and p files and checks for full download ''' ''' If the last hour field has identical values for every lat and lon box then it has failed ''' ''' A failed file is removed and program aborts ''' ''' Program will need to be restarted ''' action = 'contrinue' # output if file is ok test_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))) # convert from list to cube test_cube = test_cube[0] test_data = test_cube.data[-1,:,:] # Are all values the same - np.unique() has a length of 1 if so if (len(np.unique(test_cube.data[-1,:,:])) == 1): # remove failed files os.remove(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))) action = 'retrieve' # either retry download or exit code depending on stage in process ## exit the program #sys.exit('Incomplete file download') return action #**************************************** def convert(year, month, ndays, remove=False): """ Now need to: - convert to q, RH , e, tw, DPD - aggregate to daily averages - regrid to 1by1 gridboxes """ MDI = -999. # Set up null_cube with desired gridding format to use as a template # Does this have to have the same time dimensions? # ndays = np.int(p_cube.data[:,0,0] / 24) time = DimCoord(np.arange(ndays*24), standard_name = 'time', units = 'hours') latitude = DimCoord(np.linspace(89.5, -89.5, 180), # latitude = DimCoord(np.linspace(90, -90, 181), standard_name = 'latitude', long_name = 'gridbox centre latitude', units = 'degrees_north') longitude = DimCoord(np.linspace(-179.5, 179.5, 360), # longitude = DimCoord(np.linspace(0, 359, 360), standard_name='longitude', long_name = 'gridbox centre longitude', units = 'degrees_east') null_cube = Cube(np.zeros((ndays*24, 180, 360), np.float32), dim_coords_and_dims=[(time, 0), (latitude, 1), (longitude, 2)]) print('Check null_cube for new grid') # pdb.set_trace() ## START OF LSM************************************************ # # read in land_sea_mask # variable = "land_sea_mask" # lsm_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))) # #pdb.set_trace() # # convert from list to cube # lsm_cube = lsm_cube[0]# # ## REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster # regridder = iris.analysis.Linear().regridder(lsm_cube, null_cube) # lsm_cube_1by1 = regridder(lsm_cube) # print('Check lsm_cube_1by1 for new grid') ## pdb.set_trace()# # # # remove old cube # lsm_cube = 0 # # lsm_cube_1by1 = lsm_cube_1by1[0,:,:] ## lsm_cube_1by1_field = lsm_cube_1by1.extract(iris.Constraint(time=0)) # lsm_cube_1by1.units = "1" # print(lsm_cube_1by1) # print('Check lsm_cube_1by1 for 2m_temperature') # #pdb.set_trace() # ## output # iris.save(lsm_cube_1by1, os.path.join(DataLoc, "{}{:02d}_{}.nc".format(year, month, variable)), zlib=True) # print('Check lsm_cube_1by1 output') # pdb.set_trace() ## END OF LSM************************************************************ # read in t, td and sp (may be VERY LARGE variable = "2m_temperature" t_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))) #pdb.set_trace() # convert from list to cube t_cube = t_cube[0] # REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster regridder = iris.analysis.Linear().regridder(t_cube, null_cube) t_cube_1by1 = regridder(t_cube) print('Check t_cube_1by1 for new grid') # pdb.set_trace() # remove old cube t_cube = 0 t_cube_1by1.data -= 273.15 # convert to C t_cube_1by1.units = "degreesC" print('Check t_cube_1by1 for 2m_temperature') #pdb.set_trace() variable = "2m_dewpoint_temperature" td_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))) # convert from list to cube td_cube = td_cube[0] # REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster td_cube_1by1 = regridder(td_cube) print('Check td_cube_1by1 for new grid') # pdb.set_trace() # remove old cube td_cube = 0 td_cube_1by1.data -= 273.15 # convert to C td_cube_1by1.units = "degreesC" print('Check td_cube_1by1 for 2m_dewpoint_temperature') # pdb.set_trace() variable = "surface_pressure" p_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))) # convert from list to cube p_cube = p_cube[0] # REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster p_cube_1by1 = regridder(p_cube) print('Check p_cube_1by1 for new grid') # pdb.set_trace() # remove old cube p_cube = 0 p_cube_1by1.data /= 100. # convert to C p_cube_1by1.units = "hPa" print('Check p_cube_1by1 for surface_pressure') # pdb.set_trace() # # if it contains 2 cubes where we have downloaded mask and wish to mask to land or sea.... # if len(p_cubelist) == 2: # # extract both cubes # pcube1 = p_cubelist[0] # pcube2 = p_cubelist[1]# # # masked1, = np.where(pcube1.data.mask[:, 0, 0] == True) # masked2, = np.where(pcube2.data.mask[:, 0, 0] == True) # # # use locations of masks to overwrite # tp_cube = pcube1[:] # tp_cube.data[masked1] = pcube2.data[masked1] # tp_cube.var_name = "tp" # # # else it's just a single cube, so easier to deal with # elif len(p_cubelist) == 1:# # # tp_cube = p_cubelist[0] # tp_cube.var_name = "tp" # No masking internally within this code... # Process q # Copy the t_cube and then change some of the fields? variable = 'specific_humidity' q_cube = t_cube_1by1.copy() q_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer q_cube.units = cf_units.Unit("g kg-2") q_cube.var_name = "q2m" q_cube.long_name = "2 metre specific humidity" # Populate the q data q_cube.data = ch.sh(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False) print('Check q_cube for new data') # pdb.set_trace() ## mask all regions which are 100% ocean #cube.data[lsm.data == 0] = utils.MDI #cube.data = np.ma.masked_where(lsm.data == 0, cube.data) #cube.data.fill_value = utils.MDI # Aggregate to daily # add a "day" indicator to allow aggregation iris.coord_categorisation.add_day_of_month(q_cube, "time", name="day_of_month") q_cube_day = q_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN) q_cube = 0 q_cube_day.remove_coord("day_of_month") q_cube_day.units = cf_units.Unit("g kg-2") print('Check q_cube for daily averages') # pdb.set_trace() # output iris.save(q_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True) q_cube_day=0 print('Check q_cube_1by1 output') # pdb.set_trace() # Process RH # Copy the t_cube and then change some of the fields? variable = 'relative_humidity' rh_cube = t_cube_1by1.copy() rh_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer rh_cube.units = cf_units.Unit("%") rh_cube.var_name = "rh2m" rh_cube.long_name = "2 metre relative humidity" # Populate the q data rh_cube.data = ch.rh(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False) print('Check rh_cube for new data') # pdb.set_trace() ## mask all regions which are 100% ocean #cube.data[lsm.data == 0] = utils.MDI #cube.data = np.ma.masked_where(lsm.data == 0, cube.data) #cube.data.fill_value = utils.MDI # Aggregate to daily # add a "day" indicator to allow aggregation iris.coord_categorisation.add_day_of_month(rh_cube, "time", name="day_of_month") rh_cube_day = rh_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN) rh_cube = 0 rh_cube_day.remove_coord("day_of_month") rh_cube_day.units = cf_units.Unit("%") print('Check rh_cube for daily averages') # pdb.set_trace() # output iris.save(rh_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True) rh_cube_day=0 print('Check rh_cube_1by1 output') #pdb.set_trace() # Process e # Copy the t_cube and then change some of the fields? variable = 'vapour_pressure' e_cube = t_cube_1by1.copy() e_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer e_cube.units = cf_units.Unit("hPa") e_cube.var_name = "e2m" e_cube.long_name = "2 metre vapour pressure" # Populate the q data e_cube.data = ch.vap(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False) print('Check e_cube for new data') # pdb.set_trace() ## mask all regions which are 100% ocean #cube.data[lsm.data == 0] = utils.MDI #cube.data = np.ma.masked_where(lsm.data == 0, cube.data) #cube.data.fill_value = utils.MDI # Aggregate to daily # add a "day" indicator to allow aggregation iris.coord_categorisation.add_day_of_month(e_cube, "time", name="day_of_month") e_cube_day = e_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN) e_cube
import os import logging import numpy as np from typing import Optional import torch from torch.utils.data import DataLoader from ..eval import Metric from .dataset import CHMMBaseDataset from .dataset import collate_fn as default_collate_fn logger = logging.getLogger(__name__) OUT_RECALL = 0.9 OUT_PRECISION = 0.8 class CHMMBaseTrainer: def __init__(self, config, collate_fn=None, training_dataset=None, valid_dataset=None, test_dataset=None, pretrain_optimizer=None, optimizer=None): self._model = None self._config = config self._training_dataset = training_dataset self._valid_dataset = valid_dataset self._test_dataset = test_dataset self._collate_fn = collate_fn self._pretrain_optimizer = pretrain_optimizer self._optimizer = optimizer self._init_state_prior = None self._init_trans_mat = None self._init_emiss_mat = None @property def config(self): return self._config @config.setter def config(self, x): logger.warning("Updating DirCHMMTrainer.config") self._config = x @property def model(self): return self._model def initialize_trainer(self): """ Initialize necessary components for training Note: Better not change the order Returns ------- the initialized trainer """ self.initialize_matrices() self.initialize_model() self.initialize_optimizers() return self def initialize_model(self): raise NotImplementedError def initialize_matrices(self): """ Initialize <HMM> transition and emission matrices Returns ------- self """ assert self._training_dataset and self._valid_dataset # inject prior knowledge about transition and emission self._init_state_prior = torch.zeros(self._config.d_hidden, device=self._config.device) + 1e-2 self._init_state_prior[0] += 1 - self._init_state_prior.sum() intg_obs = list(map(np.array, self._training_dataset.obs + self._valid_dataset.obs)) # construct/load initial transition matrix dataset_dir = os.path.split(self._config.train_path)[0] transmat_path = os.path.join(dataset_dir, "init_transmat.pt") if getattr(self._config, "load_init_mat", False): if os.path.isfile(transmat_path): logger.info("Loading initial transition matrix from disk") self._init_trans_mat = torch.load(transmat_path) # if the loaded transmat does not have the proper shape, re-calculate it. s0_transmat, s1_transmat = self._init_trans_mat.shape if not (s0_transmat == s1_transmat == self.config.d_obs): self._init_trans_mat = None if self._init_trans_mat is None: self._init_trans_mat = torch.tensor(initialise_transmat( observations=intg_obs, label_set=self._config.bio_label_types )[0], dtype=torch.float) if getattr(self._config, "save_init_mat", False): logger.info("Saving initial transition matrix") torch.save(self._init_trans_mat, transmat_path) # construct/load initial emission matrix emissmat_path = os.path.join(dataset_dir, "init_emissmat.pt") if getattr(self._config, "load_init_mat", False): if os.path.isfile(emissmat_path): logger.info("Loading initial emission matrix from disk") self._init_emiss_mat = torch.load(emissmat_path) # if the loaded emissmat does not have the proper shape, re-calculate it. s0_emissmat, s1_emissmat, s2_emissmat = self._init_emiss_mat.shape if not (s0_emissmat == self.config.n_src) and (s1_emissmat == s2_emissmat == self.config.d_obs): self._init_emiss_mat = None if self._init_emiss_mat is None: self._init_emiss_mat = torch.tensor(initialise_emissions( observations=intg_obs, label_set=self._config.bio_label_types, sources=self._config.sources, src_priors=self._config.src_priors )[0], dtype=torch.float) if getattr(self._config, "save_init_mat", False): logger.info("Saving initial emission matrix") torch.save(self._init_emiss_mat, emissmat_path) return self def initialize_optimizers(self, optimizer=None, pretrain_optimizer=None): self._optimizer = self.get_optimizer() if optimizer is None else optimizer self._pretrain_optimizer = self.get_pretrain_optimizer() if pretrain_optimizer is None else pretrain_optimizer def get_dataloader(self, dataset, shuffle=False): if dataset is not None: dataloader = DataLoader( dataset=dataset, batch_size=self._config.lm_batch_size, collate_fn=self._collate_fn if self._collate_fn is not None else default_collate_fn, shuffle=shuffle, drop_last=False ) return dataloader else: logger.error('Dataset is not defined') raise ValueError("Dataset is not defined!") def pretrain_step(self, data_loader, optimizer, trans_, emiss_): raise NotImplementedError def training_step(self, data_loader, optimizer): raise NotImplementedError def train(self): raise NotImplementedError def valid(self) -> Metric: self._model.to(self._config.device) valid_metrics = self.evaluate(self._valid_dataset) logger.info("Validation results:") for k, v in valid_metrics.items(): logger.info(f" {k}: {v:.4f}") return valid_metrics def test(self) -> Metric: self._model.to(self._config.device) test_metrics = self.evaluate(self._test_dataset) logger.info("Test results:") for k, v in test_metrics.items(): logger.info(f" {k}: {v:.4f}") return test_metrics def evaluate(self, dataset: CHMMBaseDataset): raise NotImplementedError def predict(self, dataset: CHMMBaseDataset): raise NotImplementedError def get_pretrain_optimizer(self): raise NotImplementedError def get_optimizer(self): # ----- initialize optimizer ----- raise NotImplementedError def save(self, output_dir: Optional[str] = None, save_optimizer: Optional[bool] = False, model_name: Optional[str] = 'chmm', optimizer_name: Optional[str] = 'chmm-optimizer', pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'): """ Save model parameters as well as trainer parameters Parameters ---------- output_dir: model directory save_optimizer: whether to save optimizer model_name: model name (suffix free) optimizer_name: optimizer name (suffix free) pretrain_optimizer_name: pretrain optimizer name (suffix free) Returns ------- None """ output_dir = output_dir if output_dir is not None else self._config.output_dir logger.info(f"Saving model to {output_dir}") model_state_dict = self._model.state_dict() torch.save(model_state_dict, os.path.join(output_dir, f'{model_name}.bin')) self._config.save(output_dir) if save_optimizer: logger.info("Saving optimizer and scheduler") torch.save(self._optimizer.state_dict(), os.path.join(output_dir, f"{optimizer_name}.bin")) torch.save(self._pretrain_optimizer.state_dict(), os.path.join(output_dir, f"{pretrain_optimizer_name}.bin")) return None def load(self, input_dir: Optional[str] = None, load_optimizer: Optional[bool] = False, model_name: Optional[str] = 'chmm', optimizer_name: Optional[str] = 'chmm-optimizer', pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'): """ Load model parameters. Parameters ---------- input_dir: model directory load_optimizer: whether load other trainer parameters model_name: model name (suffix free) optimizer_name: optimizer name (suffix free) pretrain_optimizer_name: pretrain optimizer name (suffix free) Returns ------- self """ input_dir = input_dir if input_dir is not None else self._config.output_dir if self._model is not None: logger.warning(f"The original model {type(self._model)} in {type(self)} is not None. " f"It will be overwritten by the loaded model!") logger.info(f"Loading model from {input_dir}") self.initialize_model() self._model.load_state_dict(torch.load(os.path.join(input_dir, f'{model_name}.bin'))) self._model.to(self.config.device) if load_optimizer: logger.info("Loading optimizer and scheduler") if self._optimizer is None: self.initialize_optimizers() if os.path.isfile(os.path.join(input_dir, f"{optimizer_name}.bin")): self._optimizer.load_state_dict( torch.load(os.path.join(input_dir, f"{optimizer_name}.bin"), map_location=self.config.device) ) else: logger.warning("Optimizer file does not exist!") if os.path.isfile(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")): self._pretrain_optimizer.load_state_dict( torch.load(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")) ) else: logger.warning("Pretrain optimizer file does not exist!") return self def save_results(self, output_dir: str, valid_results: Optional[Metric] = None, file_name: Optional[str] = 'results', disable_final_valid: Optional[bool] = False, disable_test: Optional[bool] = False, disable_inter_results: Optional[bool] = False) -> None: """ Save training (validation) results Parameters ---------- output_dir: output directory, should be a folder valid_results: validation results during the training process file_name: file name disable_final_valid: disable final validation process (getting validation results of the trained model) disable_test: disable test process disable_inter_results: do not save inter-results Returns ------- None """ if not disable_final_valid: logger.info("Getting final validation metrics") valid_metrics = self.valid() else: valid_metrics = None if not disable_test: logger.info("Getting test metrics.") test_metrics = self.test() else: test_metrics = None # write validation and test results result_file = os.path.join(output_dir, f'{file_name}.txt') logger.info(f"Writing results to {result_file}") self.write_result(file_path=result_file, valid_results=valid_results, final_valid_metrics=valid_metrics, test_metrics=test_metrics) if not disable_inter_results: # save validation inter results logger.info(f"Saving inter results") inter_result_file = os.path.join(output_dir, f'{file_name}-inter.pt') torch.save(valid_results.__dict__, inter_result_file) return None @staticmethod def write_result(file_path: str, valid_results: Optional[Metric] = None, final_valid_metrics: Optional[Metric] = None, test_metrics: Optional[Metric] = None) -> None: """ Support functions for saving training results Parameters ---------- file_path: where to save results valid_results: validation results during the training process final_valid_metrics: validation results of the trained model test_metrics Returns ------- """ with open(file_path, 'w') as f: if valid_results is not None: for i in range(len(valid_results)): f.write(f"[Epoch {i + 1}]\n") for k in ['precision', 'recall', 'f1']: f.write(f" {k}: {valid_results[k][i]:.4f}") f.write("\n") if final_valid_metrics is not None: f.write(f"[Best Validation]\n") for k in ['precision', 'recall', 'f1']: f.write(f" {k}: {final_valid_metrics[k]:.4f}") f.write("\n") if test_metrics is not None: f.write(f"[Test]\n") for k in ['precision', 'recall', 'f1']: f.write(f" {k}: {test_metrics[k]:.4f}") f.write("\n") return None def initialise_startprob(observations, label_set, src_idx=None): """ calculate initial hidden states (not used in our setup since our sequences all begin from [CLS], which corresponds to hidden state "O". :param src_idx: source index :param label_set: a set of all possible label_set :param observations: n_instances X seq_len X n_src X d_obs :return: probabilities for the initial hidden states """ n_src = observations[0].shape[1] logger.info("Constructing start distribution prior...") init_counts = np.zeros((len(label_set),)) if src_idx is not None: for obs in observations: init_counts[obs[0, src_idx].argmax()] += 1 else: for obs in observations: for z in range(n_src): init_counts[obs[0, z].argmax()] += 1 for i, label in enumerate(label_set): if i == 0 or label.startswith("B-"): init_counts[i] += 1 startprob_prior = init_counts + 1 startprob_ = np.random.dirichlet(init_counts + 1E-10) return startprob_, startprob_prior # TODO: try to use a more reliable source to start the transition and emission def initialise_transmat(observations, label_set, src_idx=None): """ initialize transition matrix :param src_idx: the index of the source of which the transition statistics is computed. If None, use all sources :param label_set: a set of all possible label_set :param observations: n_instances X seq_len X n_src X d_obs :return: initial transition matrix and transition counts """ logger.info("Constructing transition matrix prior...") n_src = observations[0].shape[1] trans_counts = np.zeros((len(label_set), len(label_set))) if src_idx is not None: for obs in observations: for k in range(0, len(obs) - 1): trans_counts[obs[k, src_idx].argmax(), obs[k + 1, src_idx].argmax()] += 1 else: for obs in observations: for k in range(0, len(obs) - 1): for z in range(n_src): trans_counts[obs[k, z].argmax(), obs[k + 1, z].argmax()] += 1 # update transition matrix with prior knowledge for i, label in enumerate(label_set): if label.startswith("B-") or label.startswith("I-"): trans_counts[i, label_set.index("I-" + label[2:])] += 1 elif i == 0 or label.startswith("I-"): for j, label2 in enumerate(label_set): if j == 0 or label2.startswith("B-"): trans_counts[i, j] += 1 transmat_prior = trans_counts + 1 # initialize transition matrix with dirichlet distribution transmat_ = np.vstack([np.random.dirichlet(trans_counts2 + 1E-10) for trans_counts2 in trans_counts]) return transmat_, transmat_prior def initialise_emissions(observations, label_set, sources, src_priors, strength=1000): """ initialize emission matrices :param sources: source names :param src_priors: source priors :param label_set: a set of all
'Longreach'}, '617750002':{'en': 'Lynd Range'}, '617750003':{'en': 'Macalister'}, '617750004':{'en': 'Maranoa'}, '617750005':{'en': 'Meandarra'}, '617750006':{'en': 'Miamba'}, '617750007':{'en': 'Miles'}, '617750008':{'en': 'Millmerran'}, '617750009':{'en': 'Mitchell'}, '617750010':{'en': 'Moonie'}, '617750011':{'en': 'Morven'}, '617750012':{'en': '<NAME>'}, '617750013':{'en': 'Muttaburra'}, '617750014':{'en': 'Nobby'}, '617750015':{'en': 'North Star'}, '617750016':{'en': 'Oakey'}, '617750017':{'en': 'Omanama'}, '617750018':{'en': 'Paroo'}, '617750019':{'en': 'Pikedale'}, '617750020':{'en': 'Pittsworth'}, '617750021':{'en': 'Quilpie'}, '617750022':{'en': 'Ravensbourne'}, '617750023':{'en': '<NAME>'}, '617750024':{'en': 'Roma'}, '617750025':{'en': 'Southwood'}, '617750026':{'en': '<NAME>'}, '617750027':{'en': 'Stanthorpe'}, '617750028':{'en': 'Surat'}, '617750029':{'en': 'Tabers'}, '617750030':{'en': 'Talwood'}, '617750031':{'en': 'Tambo'}, '617750032':{'en': 'Tara'}, '617750033':{'en': 'Taroom'}, '617750034':{'en': 'Teelba'}, '617750035':{'en': 'Texas'}, '617750036':{'en': 'Thallon'}, '617750037':{'en': 'Thargomindah'}, '617750038':{'en': '<NAME>'}, '617750039':{'en': 'Thomson'}, '617750040':{'en': 'Tipton'}, '617750041':{'en': 'Toobeah'}, '617750042':{'en': 'Toowoomba'}, '617750043':{'en': '<NAME>'}, '617750044':{'en': 'Wallumbilla'}, '617750045':{'en': 'Wandoan'}, '617750046':{'en': 'Warra'}, '617750047':{'en': 'Warrego'}, '617750048':{'en': 'Warwick'}, '617750049':{'en': 'Westgrove'}, '617750050':{'en': 'Westmar'}, '617750051':{'en': 'Winton'}, '617750052':{'en': 'Wyaga'}, '617750053':{'en': 'Yelarbon'}, '617750054':{'en': 'Yetman'}, '617750055':{'en': 'Yuleba'}, '617750056':{'en': 'Allora'}, '617750057':{'en': 'Aramac'}, '617750058':{'en': 'Arcadia Valley'}, '617750059':{'en': 'Atholwood'}, '617750100':{'en': 'Auburn'}, '617750101':{'en': 'Augathella'}, '617750102':{'en': 'Ballandean'}, '617750103':{'en': 'Balonne'}, '617750104':{'en': 'Barcaldine'}, '617750105':{'en': 'Barcoo'}, '617750106':{'en': 'Beebo'}, '617750107':{'en': 'Bell'}, '617750108':{'en': '<NAME>'}, '617750109':{'en': 'Bimbadeen'}, '617750110':{'en': 'Blackall'}, '617750111':{'en': 'Bollon'}, '617750112':{'en': 'Bonshaw'}, '617750113':{'en': 'Bowenville'}, '617750114':{'en': 'Brigalow'}, '617750115':{'en': 'Bringalily'}, '617750116':{'en': 'Brookstead'}, '617750117':{'en': 'Brymaroo'}, '617750118':{'en': 'Bunya Mountains'}, '617750119':{'en': 'Cambooya'}, '617750120':{'en': 'Cecil Plains'}, '617750121':{'en': 'Charleville'}, '617750122':{'en': 'Chinchilla'}, '617750123':{'en': 'Clifton'}, '617750124':{'en': 'Cockatoo'}, '617750125':{'en': 'Condamine'}, '617750126':{'en': 'Coondarra'}, '617750127':{'en': 'Cooyar'}, '617750128':{'en': 'Cottonvale'}, '617750129':{'en': 'Crows Nest'}, '617750130':{'en': 'Culgoa'}, '617750131':{'en': 'Cunnamulla'}, '617750132':{'en': 'Cunningham'}, '617750133':{'en': 'Dalby'}, '617750134':{'en': 'Darr Creek'}, '617750135':{'en': 'Diamantina'}, '617750136':{'en': 'Diamondy'}, '617750137':{'en': 'Dirranbandi'}, '617750138':{'en': 'Dulacca'}, '617750139':{'en': 'Dunmore'}, '617750140':{'en': 'Durham Downs'}, '617750141':{'en': 'Elbow Valley'}, '617750142':{'en': 'Eschol'}, '617750143':{'en': 'Freestone'}, '617750144':{'en': 'Galilee'}, '617750145':{'en': 'Glenhope'}, '617750146':{'en': 'Goombi'}, '617750147':{'en': 'Goombungee'}, '617750148':{'en': 'Goondiwindi'}, '617750149':{'en': 'Greenmount'}, '617750150':{'en': 'Guluguba'}, '617750151':{'en': 'Haddon'}, '617750152':{'en': 'Haden'}, '617750153':{'en': 'Helidon'}, '617750154':{'en': 'Inglewood'}, '617750155':{'en': 'Injune'}, '617750156':{'en': 'Isisford'}, '617750157':{'en': 'Jandowae'}, '617750158':{'en': 'Jericho'}, '617750159':{'en': 'Jimbour'}, '617750160':{'en': 'Jondaryan'}, '617750161':{'en': 'Jundah'}, '617750162':{'en': 'Kilbeggan'}, '617750163':{'en': 'Killarney'}, '617750164':{'en': 'Kumbarilla'}, '617750165':{'en': 'Kupunn'}, '617750166':{'en': 'Legume'}, '617750167':{'en': 'Leyburn'}, '617750168':{'en': 'Liston'}, '617750169':{'en': 'Longreach'}, '617750170':{'en': '<NAME>'}, '617750171':{'en': 'Macalister'}, '617750172':{'en': 'Maranoa'}, '617750173':{'en': 'Meandarra'}, '617750174':{'en': 'Miamba'}, '617750175':{'en': 'Miles'}, '617750176':{'en': 'Millmerran'}, '617750177':{'en': 'Mitchell'}, '617750178':{'en': 'Moonie'}, '617750179':{'en': 'Morven'}, '617750180':{'en': '<NAME>'}, '617750181':{'en': 'Muttaburra'}, '617750182':{'en': 'Nobby'}, '617750183':{'en': 'North Star'}, '617750184':{'en': 'Oakey'}, '617750185':{'en': 'Omanama'}, '617750186':{'en': 'Paroo'}, '617750187':{'en': 'Pikedale'}, '617750188':{'en': 'Pittsworth'}, '617750189':{'en': 'Quilpie'}, '617750190':{'en': 'Ravensbourne'}, '617750191':{'en': '<NAME>'}, '617750192':{'en': 'Roma'}, '617750193':{'en': 'Southwood'}, '617750194':{'en': 'St George'}, '617750195':{'en': 'Stanthorpe'}, '617750196':{'en': 'Surat'}, '617750197':{'en': 'Tabers'}, '617750198':{'en': 'Talwood'}, '617750199':{'en': 'Tambo'}, '617750200':{'en': 'Tara'}, '617750201':{'en': 'Taroom'}, '617750202':{'en': 'Teelba'}, '617750203':{'en': 'Texas'}, '617750204':{'en': 'Thallon'}, '617750205':{'en': 'Thargomindah'}, '617750206':{'en': 'The Gums'}, '617750207':{'en': 'Thomson'}, '617750208':{'en': 'Tipton'}, '617750209':{'en': 'Toobeah'}, '617750210':{'en': 'Toowoomba'}, '617750211':{'en': '<NAME>'}, '617750212':{'en': 'Wallumbilla'}, '617750213':{'en': 'Wandoan'}, '617750214':{'en': 'Warra'}, '617750215':{'en': 'Warrego'}, '617750216':{'en': 'Warwick'}, '617750217':{'en': 'Westgrove'}, '617750218':{'en': 'Westmar'}, '617750219':{'en': 'Winton'}, '617750220':{'en': 'Wyaga'}, '617750221':{'en': 'Yelarbon'}, '617750222':{'en': 'Yetman'}, '617750223':{'en': 'Yuleba'}, '617750224':{'en': 'Allora'}, '617750225':{'en': 'Aramac'}, '617750226':{'en': 'Arcadia Valley'}, '617750227':{'en': 'Atholwood'}, '617750228':{'en': 'Auburn'}, '617750229':{'en': 'Augathella'}, '61775023':{'en': 'Cambooya'}, '61775024':{'en': 'Mitchell'}, '617750300':{'en': 'Ballandean'}, '617750301':{'en': 'Balonne'}, '617750302':{'en': 'Barcaldine'}, '617750303':{'en': 'Barcoo'}, '617750304':{'en': 'Beebo'}, '617750305':{'en': 'Bell'}, '617750306':{'en': 'Billa Billa'}, '617750307':{'en': 'Bimbadeen'}, '617750308':{'en': 'Blackall'}, '617750309':{'en': 'Bollon'}, '617750310':{'en': 'Bonshaw'}, '617750311':{'en': 'Bowenville'}, '617750312':{'en': 'Brigalow'}, '617750313':{'en': 'Bringalily'}, '617750314':{'en': 'Brookstead'}, '617750315':{'en': 'Brymaroo'}, '617750316':{'en': 'Bunya Mountains'}, '617750317':{'en': 'Cambooya'}, '617750318':{'en': 'Cecil Plains'}, '617750319':{'en': 'Charleville'}, '617750320':{'en': 'Chinchilla'}, '617750321':{'en': 'Clifton'}, '617750322':{'en': 'Cockatoo'}, '617750323':{'en': 'Condamine'}, '617750324':{'en': 'Coondarra'}, '617750325':{'en': 'Cooyar'}, '617750326':{'en': 'Cottonvale'}, '617750327':{'en': 'Crows Nest'}, '617750328':{'en': 'Culgoa'}, '617750329':{'en': 'Cunnamulla'}, '617750330':{'en': 'Cunningham'}, '617750331':{'en': 'Dalby'}, '617750332':{'en': 'Darr Creek'}, '617750333':{'en': 'Diamantina'}, '617750334':{'en': 'Diamondy'}, '617750335':{'en': 'Dirranbandi'}, '617750336':{'en': 'Dulacca'}, '617750337':{'en': 'Dunmore'}, '617750338':{'en': 'Durham Downs'}, '617750339':{'en': 'Elbow Valley'}, '617750340':{'en': 'Eschol'}, '617750341':{'en': 'Freestone'}, '617750342':{'en': 'Galilee'}, '617750343':{'en': 'Glenhope'}, '617750344':{'en': 'Goombi'}, '617750345':{'en': 'Goombungee'}, '617750346':{'en': 'Goondiwindi'}, '617750347':{'en': 'Greenmount'}, '617750348':{'en': 'Guluguba'}, '617750349':{'en': 'Haddon'}, '617750350':{'en': 'Haden'}, '617750351':{'en': 'Helidon'}, '617750352':{'en': 'Inglewood'}, '617750353':{'en': 'Injune'}, '617750354':{'en': 'Isisford'}, '617750355':{'en': 'Jandowae'}, '617750356':{'en': 'Jericho'}, '617750357':{'en': 'Jimbour'}, '617750358':{'en': 'Jondaryan'}, '617750359':{'en': 'Jundah'}, '617750360':{'en': 'Kilbeggan'}, '617750361':{'en': 'Killarney'}, '617750362':{'en': 'Kumbarilla'}, '617750363':{'en': 'Kupunn'}, '617750364':{'en': 'Legume'}, '617750365':{'en': 'Leyburn'}, '617750366':{'en': 'Liston'}, '617750367':{'en': 'Longreach'}, '617750368':{'en': '<NAME>'}, '617750369':{'en': 'Macalister'}, '617750370':{'en': 'Maranoa'}, '617750371':{'en': 'Meandarra'}, '617750372':{'en': 'Miamba'}, '617750373':{'en': 'Miles'}, '617750374':{'en': 'Millmerran'}, '617750375':{'en': 'Mitchell'}, '617750376':{'en': 'Moonie'}, '617750377':{'en': 'Morven'}, '617750378':{'en': '<NAME>'}, '617750379':{'en': 'Muttaburra'}, '617750380':{'en': 'Nobby'}, '617750381':{'en': 'North Star'}, '617750382':{'en': 'Oakey'}, '617750383':{'en': 'Omanama'}, '617750384':{'en': 'Paroo'}, '617750385':{'en': 'Pikedale'}, '617750386':{'en': 'Pittsworth'}, '617750387':{'en': 'Quilpie'}, '617750388':{'en': 'Ravensbourne'}, '617750389':{'en': '<NAME>'}, '617750390':{'en': 'Roma'}, '617750391':{'en': 'Southwood'}, '617750392':{'en': '<NAME>'}, '617750393':{'en': 'Stanthorpe'}, '617750394':{'en': 'Surat'}, '617750395':{'en': 'Tabers'}, '617750396':{'en': 'Talwood'}, '617750397':{'en': 'Tambo'}, '617750398':{'en': 'Tara'}, '617750399':{'en': 'Taroom'}, '617750400':{'en': 'Teelba'}, '617750401':{'en': 'Texas'}, '617750402':{'en': 'Thallon'}, '617750403':{'en': 'Thargomindah'}, '617750404':{'en': '<NAME>'}, '617750405':{'en': 'Thomson'}, '617750406':{'en': 'Tipton'}, '617750407':{'en': 'Toobeah'}, '617750408':{'en': 'Toowoomba'}, '617750409':{'en': '<NAME>'}, '617750410':{'en': 'Wallumbilla'}, '617750411':{'en': 'Wandoan'}, '617750412':{'en': 'Warra'}, '617750413':{'en': 'Warrego'}, '617750414':{'en': 'Warwick'}, '617750415':{'en': 'Westgrove'}, '617750416':{'en': 'Westmar'}, '617750417':{'en': 'Winton'}, '617750418':{'en': 'Wyaga'}, '617750419':{'en': 'Yelarbon'}, '617750420':{'en': 'Yetman'}, '617750421':{'en': 'Yuleba'}, '61775049':{'en': 'Bonshaw'}, '61775060':{'en': 'Bunya Mountains'}, '61775079':{'en': '<NAME>'}, '61775090':{'en': 'Inglewood'}, '61775109':{'en': 'Southwood'}, '61775120':{'en': 'Tara'}, '61775139':{'en': 'Haddon'}, '61775142':{'en': 'Ravensbourne'}, '61775143':{'en': 'Valley Downs'}, '61775144':{'en': 'Toowoomba'}, '61851000':{'en': 'Broome'}, '61851001':{'en': 'Christmas Island'}, '61851002':{'en': 'Cocos Island'}, '61851003':{'en': 'Dampier'}, '61851004':{'en': '<NAME>'}, '61851005':{'en': 'Derby'}, '61851006':{'en': '<NAME>'}, '61851007':{'en': '<NAME>'}, '61851008':{'en': 'Hall\'s Creek'}, '61851009':{'en': 'Karratha'}, '61851010':{'en': 'Kununurra'}, '61851011':{'en': 'Leopold'}, '61851012':{'en': '<NAME>'}, '61851013':{'en': 'Millstream'}, '61851014':{'en': 'Mitchell'}, '61851015':{'en': '<NAME>'}, '61851016':{'en': 'Newman'}, '61851017':{'en': 'Onslow'}, '61851018':{'en': 'Ord'}, '61851019':{'en': 'Pannawonica'}, '61851020':{'en': 'Paraburdoo'}, '61851021':{'en': 'Port Hedland'}, '61851022':{'en': 'Roebuck'}, '61851023':{'en': 'Sandfire'}, '61851024':{'en': 'Telfer'}, '61851025':{'en': '<NAME>'}, '61851026':{'en': 'Whaleback'}, '61851027':{'en': 'Wittenoom'}, '61851028':{'en': 'Wyndham'}, '61851029':{'en': 'Broome'}, '61851030':{'en': 'Christmas Island'}, '61851031':{'en': 'Cocos Island'}, '61851032':{'en': 'Dampier'}, '61851033':{'en': '<NAME>'}, '61851034':{'en': 'Derby'}, '61851035':{'en': '<NAME>'}, '61851036':{'en': '<NAME>'}, '61851037':{'en': 'Hall\'s Creek'}, '61851038':{'en': 'Karratha'}, '61851039':{'en': 'Kununurra'}, '61851040':{'en': 'Leopold'}, '61851041':{'en': '<NAME>'}, '61851042':{'en': 'Millstream'}, '61851043':{'en': 'Mitchell'}, '61851044':{'en': 'Mount Bruce'}, '61851045':{'en': 'Newman'}, '61851046':{'en': 'Onslow'}, '61851047':{'en': 'Ord'}, '61851048':{'en': 'Pannawonica'}, '61851049':{'en': 'Paraburdoo'}, '61851050':{'en': 'Port Hedland'}, '61851051':{'en': 'Roebuck'}, '61851052':{'en': 'Sandfire'}, '61851053':{'en': 'Telfer'}, '61851054':{'en': '<NAME>'}, '61851055':{'en': 'Whaleback'}, '61851056':{'en': 'Wittenoom'}, '61851057':{'en': 'Wyndham'}, '61851058':{'en': 'Broome'}, '61851059':{'en': 'Christmas Island'}, '61851060':{'en': 'Cocos Island'}, '61851061':{'en': 'Dampier'}, '61851062':{'en': '<NAME>'}, '61851063':{'en': 'Derby'}, '61851064':{'en': '<NAME>'}, '61851065':{'en': '<NAME>'}, '61851066':{'en': 'Hall\'s Creek'}, '61851067':{'en': 'Karratha'}, '61851068':{'en': 'Kununurra'}, '61851069':{'en': 'Leopold'}, '61851070':{'en': '<NAME>'}, '61851071':{'en': 'Millstream'}, '61851072':{'en': 'Mitchell'}, '61851073':{'en': 'Mount Bruce'}, '61851074':{'en': 'Newman'}, '61851075':{'en': 'Onslow'}, '61851076':{'en': 'Ord'}, '61851077':{'en': 'Pannawonica'}, '61851078':{'en': 'Paraburdoo'}, '61851079':{'en': 'Port Hedland'}, '61851080':{'en': 'Roebuck'}, '61851081':{'en': 'Sandfire'}, '61851082':{'en': 'Telfer'}, '61851083':{'en': '<NAME>'}, '61851084':{'en': 'Whaleback'}, '61851085':{'en': 'Wittenoom'}, '61851086':{'en': 'Wyndham'}, '61851087':{'en': 'Broome'}, '61851088':{'en': 'Christmas Island'}, '61851089':{'en': 'Cocos Island'}, '61851090':{'en': 'Dampier'}, '61851091':{'en': '<NAME>'}, '61851092':{'en': 'Derby'}, '61851093':{'en': '<NAME>'}, '61851094':{'en': '<NAME>'}, '61851095':{'en': 'Hall\'s Creek'}, '61851096':{'en': 'Karratha'}, '61851097':{'en': 'Kununurra'}, '61851098':{'en': 'Leopold'}, '61851099':{'en': '<NAME>'}, '61851100':{'en': 'Millstream'}, '61851101':{'en': 'Mitchell'}, '61851102':{'en': 'Mount Bruce'}, '61851103':{'en': 'Newman'}, '61851104':{'en': 'Onslow'}, '61851105':{'en': 'Ord'}, '61851106':{'en': 'Pannawonica'}, '61851107':{'en': 'Paraburdoo'}, '61851108':{'en': 'Port Hedland'}, '61851109':{'en': 'Roebuck'}, '61851110':{'en': 'Sandfire'}, '61851111':{'en': 'Telfer'}, '61851112':{'en': '<NAME>'}, '61851113':{'en': 'Whaleback'}, '61851114':{'en': 'Wittenoom'}, '61851115':{'en': 'Wyndham'}, '61851116':{'en': 'Broome'}, '61851117':{'en': 'Christmas Island'}, '61851118':{'en': 'Cocos Island'}, '61851119':{'en': 'Dampier'}, '61851120':{'en': '<NAME>'}, '61851121':{'en': 'Derby'}, '61851122':{'en': '<NAME>'}, '61851123':{'en': 'Great Sandy'}, '61851124':{'en': 'Hall\'s Creek'}, '61851125':{'en': 'Karratha'}, '61851126':{'en': 'Kununurra'}, '61851127':{'en': 'Leopold'}, '61851128':{'en': '<NAME>'}, '61851129':{'en': 'Millstream'}, '61851130':{'en': 'Mitchell'}, '61851131':{'en': 'Mount Bruce'}, '61851132':{'en': 'Newman'}, '61851133':{'en': 'Onslow'}, '61851134':{'en': 'Ord'}, '61851135':{'en': 'Pannawonica'}, '61851136':{'en': 'Paraburdoo'}, '61851137':{'en': 'Port Hedland'}, '61851138':{'en': 'Roebuck'}, '61851139':{'en': 'Sandfire'}, '61851140':{'en': 'Telfer'}, '61851141':{'en': '<NAME>'}, '61851142':{'en': 'Whaleback'}, '61851143':{'en': 'Wittenoom'}, '61851144':{'en': 'Wyndham'}, '61851145':{'en': 'Broome'}, '61851146':{'en': 'Christmas Island'}, '61851148':{'en': 'Dampier'}, '61851149':{'en': '<NAME>'}, '61851150':{'en': 'Derby'}, '61851151':{'en': '<NAME>'}, '61851152':{'en': '<NAME>'}, '61851153':{'en': 'Hall\'s Creek'}, '61851154':{'en': 'Karratha'}, '61851155':{'en': 'Kununurra'}, '61851156':{'en': 'Leopold'}, '61851157':{'en': '<NAME>'}, '61851158':{'en': 'Millstream'}, '61851159':{'en': 'Mitchell'}, '61851160':{'en': '<NAME>'}, '61851161':{'en': 'Newman'}, '61851162':{'en': 'Onslow'}, '61851163':{'en': 'Ord'}, '61851164':{'en': 'Pannawonica'}, '61851165':{'en': 'Paraburdoo'}, '61851166':{'en': 'Port Hedland'}, '61851167':{'en': 'Roebuck'}, '61851168':{'en': 'Sandfire'}, '61851169':{'en': 'Telfer'}, '61851170':{'en': '<NAME>'}, '61851171':{'en': 'Whaleback'}, '61851172':{'en': 'Wittenoom'}, '61851173':{'en': 'Wyndham'}, '61851174':{'en': 'Broome'}, '61851175':{'en': 'Christmas Island'}, '61851176':{'en': 'Cocos Island'}, '61851177':{'en': 'Dampier'}, '61851178':{'en': '<NAME>'}, '61851179':{'en': 'Derby'}, '61851180':{'en': '<NAME>'}, '61851181':{'en': 'Great Sandy'}, '61851182':{'en': 'Hall\'s Creek'}, '61851183':{'en': 'Karratha'}, '61851184':{'en': 'Kununurra'}, '61851185':{'en': 'Leopold'}, '61851186':{'en': '<NAME>'}, '61851187':{'en': 'Millstream'}, '61851188':{'en': 'Mitchell'}, '61851189':{'en': 'Mount Bruce'}, '61851190':{'en': 'Newman'}, '61851191':{'en': 'Onslow'}, '61851192':{'en': 'Ord'}, '61851193':{'en': 'Pannawonica'}, '61851194':{'en': 'Paraburdoo'}, '61851195':{'en': 'Port Hedland'}, '61851196':{'en': 'Roebuck'}, '61851197':{'en': 'Sandfire'}, '61851198':{'en': 'Telfer'}, '61851199':{'en': '<NAME>'}, '61851200':{'en': 'Whaleback'}, '61851201':{'en': 'Wittenoom'}, '61851202':{'en': 'Wyndham'}, '61851203':{'en': 'Broome'}, '61851204':{'en': 'Christmas Island'}, '61851205':{'en': 'Port Hedland'}, '61851206':{'en': 'Dampier'}, '61851207':{'en': 'De Grey'}, '61851209':{'en': 'Mitchell'}, '61851211':{'en': 'Mount Bruce'}, '61851213':{'en': 'Newman'}, '61851215':{'en': 'Onslow'}, '61851217':{'en': 'Ord'}, '61851219':{'en': 'Pannawonica'}, '61851221':{'en': 'Paraburdoo'}, '61851222':{'en': 'Christmas Island'}, '61851223':{'en': 'Cocos Island'}, '61851224':{'en': 'Dampier'}, '61851225':{'en': 'De Grey'}, '61851226':{'en': 'Derby'}, '61851227':{'en': 'Fitzroy Crossing'}, '61851228':{'en': 'Great Sandy'}, '61851229':{'en': 'Hall\'s Creek'}, '61851230':{'en': 'Karratha'}, '61851231':{'en': 'Kununurra'}, '61851232':{'en': 'Port Hedland'}, '61851233':{'en': 'Port Hedland'}, '61851234':{'en': 'Broome'}, '61851235':{'en': 'Christmas Island'}, '61851236':{'en': 'Leopold'}, '61851237':{'en': '<NAME>'}, '61851238':{'en': 'Millstream'}, '61851239':{'en': 'Mitchell'}, '6185124':{'en': 'Karratha'}, '61851250':{'en': 'Newman'}, '61851251':{'en': 'Onslow'}, '61851252':{'en': 'Ord'}, '61851253':{'en': 'Pannawonica'}, '61851254':{'en': 'Paraburdoo'}, '61851255':{'en': 'Paraburdoo'}, '61851256':{'en': 'Paraburdoo'}, '61851257':{'en': 'Paraburdoo'}, '61851258':{'en': 'Port Hedland'}, '61851259':{'en': 'Roebuck'}, '61851260':{'en': 'Sandfire'}, '61851261':{'en': 'Telfer'}, '61851262':{'en': '<NAME>'}, '61851263':{'en': 'Whaleback'}, '61851264':{'en': 'Wittenoom'}, '61851265':{'en': 'Wyndham'}, '61851267':{'en': 'Roebuck'}, '61851269':{'en': 'Sandfire'}, '6185127':{'en': 'Port Hedland'}, '61851281':{'en': 'Telfer'}, '61851283':{'en': '<NAME>'}, '61851285':{'en': 'Whaleback'}, '61851287':{'en': 'Wittenoom'}, '61851289':{'en': 'Wyndham'}, '61851290':{'en': 'Telfer'}, '61851291':{'en': '<NAME>'}, '61851292':{'en': 'Whaleback'}, '61851293':{'en': 'Wittenoom'}, '61851294':{'en': 'Wyndham'}, '61860000':{'en': 'Kalgoorlie'}, '61860001':{'en': 'Burracoppin'}, '61860002':{'en': 'Burracoppin'}, '61860003':{'en': 'Grass Patch'}, '61860004':{'en': 'Grass Patch'}, '61860005':{'en': 'Holleton'}, '61860006':{'en': 'Holleton'}, '61860007':{'en': 'Laverton'}, '61860008':{'en': 'Laverton'}, '61860009':{'en': 'Leinster'}, '61860010':{'en': 'Leinster'}, '61860011':{'en': 'Leonora'}, '61860012':{'en': 'Leonora'}, '61860013':{'en': '<NAME>er South'}, '61860014':{'en': 'Mount Walker South'}, '61860015':{'en': 'Kalgoorlie'}, '61860016':{'en': 'Esperance'},
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['SnapshotArgs', 'Snapshot'] @pulumi.input_type class SnapshotArgs: def __init__(__self__, *, instance_id: pulumi.Input[str], cron_timing: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, safe: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a Snapshot resource. :param pulumi.Input[str] instance_id: The ID of the Instance from which the snapshot will be taken. :param pulumi.Input[str] cron_timing: If a valid cron string is passed, the snapshot will be saved as an automated snapshot continuing to automatically update based on the schedule of the cron sequence provided The default is nil meaning the snapshot will be saved as a one-off snapshot. :param pulumi.Input[str] name: A name for the instance snapshot. :param pulumi.Input[bool] safe: If `true` the instance will be shut down during the snapshot to ensure all files are in a consistent state (e.g. database tables aren't in the middle of being optimised and hence risking corruption). The default is `false` so you experience no interruption of service, but a small risk of corruption. """ pulumi.set(__self__, "instance_id", instance_id) if cron_timing is not None: pulumi.set(__self__, "cron_timing", cron_timing) if name is not None: pulumi.set(__self__, "name", name) if safe is not None: pulumi.set(__self__, "safe", safe) @property @pulumi.getter(name="instanceId") def instance_id(self) -> pulumi.Input[str]: """ The ID of the Instance from which the snapshot will be taken. """ return pulumi.get(self, "instance_id") @instance_id.setter def instance_id(self, value: pulumi.Input[str]): pulumi.set(self, "instance_id", value) @property @pulumi.getter(name="cronTiming") def cron_timing(self) -> Optional[pulumi.Input[str]]: """ If a valid cron string is passed, the snapshot will be saved as an automated snapshot continuing to automatically update based on the schedule of the cron sequence provided The default is nil meaning the snapshot will be saved as a one-off snapshot. """ return pulumi.get(self, "cron_timing") @cron_timing.setter def cron_timing(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cron_timing", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ A name for the instance snapshot. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def safe(self) -> Optional[pulumi.Input[bool]]: """ If `true` the instance will be shut down during the snapshot to ensure all files are in a consistent state (e.g. database tables aren't in the middle of being optimised and hence risking corruption). The default is `false` so you experience no interruption of service, but a small risk of corruption. """ return pulumi.get(self, "safe") @safe.setter def safe(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "safe", value) @pulumi.input_type class _SnapshotState: def __init__(__self__, *, completed_at: Optional[pulumi.Input[str]] = None, cron_timing: Optional[pulumi.Input[str]] = None, hostname: Optional[pulumi.Input[str]] = None, instance_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, next_execution: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, requested_at: Optional[pulumi.Input[str]] = None, safe: Optional[pulumi.Input[bool]] = None, size_gb: Optional[pulumi.Input[int]] = None, state: Optional[pulumi.Input[str]] = None, template_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Snapshot resources. :param pulumi.Input[str] completed_at: The date where the snapshot was completed. :param pulumi.Input[str] cron_timing: If a valid cron string is passed, the snapshot will be saved as an automated snapshot continuing to automatically update based on the schedule of the cron sequence provided The default is nil meaning the snapshot will be saved as a one-off snapshot. :param pulumi.Input[str] hostname: The hostname of the instance. :param pulumi.Input[str] instance_id: The ID of the Instance from which the snapshot will be taken. :param pulumi.Input[str] name: A name for the instance snapshot. :param pulumi.Input[str] next_execution: if cron was define this date will be the next execution date. :param pulumi.Input[str] region: The region where the snapshot was take. :param pulumi.Input[str] requested_at: The date where the snapshot was requested. :param pulumi.Input[bool] safe: If `true` the instance will be shut down during the snapshot to ensure all files are in a consistent state (e.g. database tables aren't in the middle of being optimised and hence risking corruption). The default is `false` so you experience no interruption of service, but a small risk of corruption. :param pulumi.Input[int] size_gb: The size of the snapshot in GB. :param pulumi.Input[str] state: The status of the snapshot. :param pulumi.Input[str] template_id: The template id. """ if completed_at is not None: pulumi.set(__self__, "completed_at", completed_at) if cron_timing is not None: pulumi.set(__self__, "cron_timing", cron_timing) if hostname is not None: pulumi.set(__self__, "hostname", hostname) if instance_id is not None: pulumi.set(__self__, "instance_id", instance_id) if name is not None: pulumi.set(__self__, "name", name) if next_execution is not None: pulumi.set(__self__, "next_execution", next_execution) if region is not None: pulumi.set(__self__, "region", region) if requested_at is not None: pulumi.set(__self__, "requested_at", requested_at) if safe is not None: pulumi.set(__self__, "safe", safe) if size_gb is not None: pulumi.set(__self__, "size_gb", size_gb) if state is not None: pulumi.set(__self__, "state", state) if template_id is not None: pulumi.set(__self__, "template_id", template_id) @property @pulumi.getter(name="completedAt") def completed_at(self) -> Optional[pulumi.Input[str]]: """ The date where the snapshot was completed. """ return pulumi.get(self, "completed_at") @completed_at.setter def completed_at(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "completed_at", value) @property @pulumi.getter(name="cronTiming") def cron_timing(self) -> Optional[pulumi.Input[str]]: """ If a valid cron string is passed, the snapshot will be saved as an automated snapshot continuing to automatically update based on the schedule of the cron sequence provided The default is nil meaning the snapshot will be saved as a one-off snapshot. """ return pulumi.get(self, "cron_timing") @cron_timing.setter def cron_timing(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cron_timing", value) @property @pulumi.getter def hostname(self) -> Optional[pulumi.Input[str]]: """ The hostname of the instance. """ return pulumi.get(self, "hostname") @hostname.setter def hostname(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "hostname", value) @property @pulumi.getter(name="instanceId") def instance_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Instance from which the snapshot will be taken. """ return pulumi.get(self, "instance_id") @instance_id.setter def instance_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "instance_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ A name for the instance snapshot. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="nextExecution") def next_execution(self) -> Optional[pulumi.Input[str]]: """ if cron was define this date will be the next execution date. """ return pulumi.get(self, "next_execution") @next_execution.setter def next_execution(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "next_execution", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region where the snapshot was take. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="requestedAt") def requested_at(self) -> Optional[pulumi.Input[str]]: """ The date where the snapshot was requested. """ return pulumi.get(self, "requested_at") @requested_at.setter def requested_at(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "requested_at", value) @property @pulumi.getter def safe(self) -> Optional[pulumi.Input[bool]]: """ If `true` the instance will be shut down during the snapshot to ensure all files are in a consistent state (e.g. database tables aren't in the middle of being optimised and hence risking corruption). The default is `false` so you experience no interruption of service, but a small risk of corruption. """ return pulumi.get(self, "safe") @safe.setter def safe(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "safe", value) @property @pulumi.getter(name="sizeGb") def size_gb(self) -> Optional[pulumi.Input[int]]: """ The size of the snapshot in GB. """ return pulumi.get(self, "size_gb") @size_gb.setter def size_gb(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "size_gb", value) @property @pulumi.getter def state(self) -> Optional[pulumi.Input[str]]: """ The status of the snapshot. """ return pulumi.get(self, "state") @state.setter def state(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "state", value) @property @pulumi.getter(name="templateId") def template_id(self) -> Optional[pulumi.Input[str]]: """ The template id. """ return pulumi.get(self, "template_id") @template_id.setter def template_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "template_id", value) class Snapshot(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cron_timing: Optional[pulumi.Input[str]] = None, instance_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, safe: Optional[pulumi.Input[bool]] = None, __props__=None): """ Provides a resource which can be used to create a snapshot from an existing Civo Instance. ## Example Usage ```python import pulumi import pulumi_civo as civo myinstance_backup = civo.Snapshot("myinstance-backup", instance_id=civo_instance["myinstance"]["id"]) ``` ## Import Instance Snapshots can be imported using the `snapshot id`, e.g. ```sh $ pulumi import civo:index/snapshot:Snapshot myinstance-backup 4cc87851-e1d0-4270-822a-b36d28c7a77f ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] cron_timing: If a valid cron
- CROP3] else: Z_dir = linearized[: len(linearized) - CROP3] return Z_dir, Z_direction def location_slices(self, txtnslices): """Define the location of the slice you are interested""" global location_slices location_slices = round(float(txtnslices.get()), 2) # Export the locations of slice from the GUI return location_slices def pixel_converter(self, location_slices): """Convert from the real z to pixel""" global location_slices_pixel_x global location_slices_pixel_y global location_slices_pixel_z location_slices_pixel_x = int(round(float(location_slices / x_actual), 2) * (x_size-1)) location_slices_pixel_y = int(round(float(location_slices / y_actual), 2) * (y_size-1)) location_slices_pixel_z = int(float(location_slices / round(float(np.array(Z_dir).max()),2)) * len(Z_dir)) + 1 return location_slices_pixel_x, location_slices_pixel_y, location_slices_pixel_z def clear(self): """Clear up the inputs""" txtnslices.delete(0, END) txtfilename.delete(0, END) canvas1.get_tk_widget().destroy() canvas2.get_tk_widget().destroy() def create_pslist(self, Z_direction): """The function that pulls out the approach or retract dataset.""" global pslist if Z_direction == "Retract": pslist = reduced_array_retract[: len(reduced_array_retract) - CROP3] else: pslist = reduced_array_approach[: len(reduced_array_approach) - CROP3] return pslist def twoDX_slicings(self, location_slices_pixel_x, export_filename2, bingo, x_actual, y_actual): """Plotting function for the X direction slicing""" global canvas1 global canvas2 root = tk.Toplevel(self) root.wm_state('zoomed') if location_slices_pixel_x in range(x_size + 1): pass else: tkMessageBox.askretrycancel("Input Error", "Out of range, The expected range for X is between 0 to " + str(x_actual) + ".") As = np.array(self.create_pslist(Z_direction))[:, location_slices_pixel_x, :] # Select the phaseshift data points in the certain slice plane As[np.isnan(As)] = np.nanmin(As) # Replace NaN with min value of array. a = np.linspace(init, x_actual, x_size)[location_slices_pixel_x] # Define the certain x slice in the x space b = np.linspace(init, y_actual, x_size) # Define the y space c = Z_dir # Define the z space X, Z, Y = np.meshgrid(a, c, b) # Create the meshgrid for the 3d space fig = plt.figure(figsize=(9, 9), facecolor='white') ax = fig.add_subplot(111, projection='3d') # Define the fixed colorbar range based the overall phaseshift values from the input data file im = ax.scatter(X, Y, Z, c=As.flatten(), s=6, vmax=np.nanmax(np.array(self.create_pslist(Z_direction))), vmin=np.nanmin(np.array(self.create_pslist(Z_direction)))) #Add colorbar cbar = plt.colorbar(im) # Label the colorbar cbar.set_label(str(valu), rotation=90) ax.set_xlim(left=init, right=x_actual) ax.set_ylim(bottom=init, top=y_actual) ax.set_zlim(top=Z_dir.max(), bottom=Z_dir.min()) ax.set_xlabel('X(nm)', fontsize=12) ax.set_ylabel('Y(nm)', fontsize=12) ax.set_zlabel('Z(nm)', fontsize=12) ax.set_title('3D X Slicing (X=' + str(round(a, 4)) + 'nm) for the ' + str(valu) + ' of AFM data', fontsize=13) canvas1 = FigureCanvasTkAgg(fig, master=root) # Plot the 3D figure of 2D slicing canvas1.draw() canvas1.get_tk_widget().pack(side=tk.LEFT) setStr = '{}_'.format(export_filename2) + str(valu) + '_Xslices.png' fig.savefig(setStr) fig1 = plt.figure(figsize=(9, 9), facecolor='white') plt.subplot(111) plt.imshow(As, aspect='auto', origin="lower", vmax=np.nanmax(np.array(self.create_pslist(Z_direction))), vmin=np.nanmin(np.array(self.create_pslist(Z_direction))), extent=[init, y_actual, init, Z_dir.max()]) plt.xlabel('Y', fontsize=12) plt.ylabel('Z', fontsize=12) plt.title('2D X Slicing (X=' + str(round(a, 4)) + 'nm) for the ' + str(valu) + ' of AFM data', fontsize=13) # Add and label colorbar cbar = plt.colorbar() cbar.set_label(str(valu)) canvas2 = FigureCanvasTkAgg(fig1, master=root) # Plot the 2D figure of 2D slicing canvas2.draw() canvas2.get_tk_widget().pack(side=tk.LEFT) setStr = '{}_'.format(export_filename2) + str(valu) + '_2d_Xslices.png' # Define the export image name fig1.savefig(setStr) if bingo == 1: #Get the HDF5 file for the plotting h5file = export_filename2 + '_' + str(valu) + '_' + str(Z_direction) + str("_XSlicing.h5") # Define the final name of the h5 file # Assuming As is a list of lists h = h5py.File(h5file, 'w') # Create the empty h5 file h.create_dataset("data", data=As) # Insert the data into the empty file else: pass def twoDY_slicings(self, location_slices_pixel_y, export_filename2, bingo, x_actual, y_actual): """Plotting function for the Y direction slicing""" global canvas1 global canvas2 if location_slices_pixel_y in range(y_size + 1): pass else: tkMessageBox.askretrycancel("Input Error", "Out of range, The expected range for Y is between 0 to " + str(y_actual) + ".") a = np.linspace(init, x_actual, x_size) b = np.linspace(init, y_actual, y_size)[location_slices_pixel_y] c = Z_dir X, Z, Y = np.meshgrid(a, c, b) Bs = np.array(self.create_pslist(Z_direction))[:, location_slices_pixel_y, :] Bs[np.isnan(Bs)] = np.nanmin(Bs) # Replace NaN with min value of array. root1 = tk.Toplevel(self) root1.state('zoomed') fig = plt.figure(figsize=(9, 9), facecolor='white') ax = fig.add_subplot(111, projection='3d') im = ax.scatter(X, Y, Z, c=Bs.flatten(), s=6, vmax=np.nanmax(np.array(self.create_pslist(Z_direction))), vmin=np.nanmin(np.array(self.create_pslist(Z_direction)))) # Add and lable colorbar cbar = plt.colorbar(im) cbar.set_label(str(valu)) ax.set_xlim(left=init, right=x_actual) ax.set_ylim(bottom=init, top=y_actual) ax.set_zlim(top=Z_dir.max(), bottom=Z_dir.min()) ax.set_xlabel('X(nm)', fontsize=12) ax.set_ylabel('Y(nm)', fontsize=12) ax.set_zlabel('Z(nm)', fontsize=12) ax.set_title('3D Y Slicing (Y=' + str(round(b, 3)) + 'nm) for the ' + str(valu) + ' of AFM data', fontsize=13) canvas1 = FigureCanvasTkAgg(fig, master=root1) canvas1.draw() canvas1.get_tk_widget().pack(side=tk.LEFT) setStr = '{}_'.format(export_filename2) + str(valu) + '_Yslices.png' fig.savefig(setStr) fig2 = plt.figure(figsize=(9, 9), facecolor='white') plt.subplot(111) plt.imshow(Bs, aspect='auto', origin="lower", vmax=np.nanmax(np.array(self.create_pslist(Z_direction))), vmin=np.nanmin(np.array(self.create_pslist(Z_direction))), extent=[init, x_actual, init, Z_dir.max()]) plt.xlabel('X', fontsize=12) plt.ylabel('Z', fontsize=12) plt.title('2D Y Slicing (Y=' + str(round(b, 3)) + 'nm) for the ' + str(valu) + ' of AFM data', fontsize=13) # Add and label colorbar cbar = plt.colorbar() cbar.set_label(str(valu)) canvas2 = FigureCanvasTkAgg(fig2, root1) canvas2.draw() canvas2.get_tk_widget().pack(side=tk.LEFT) setStr = '{}_'.format(export_filename2) + str(valu) + '_2d_Yslices.png' fig2.savefig(setStr) if bingo == 1: #Get the HDF5 file for the plotting h5file = export_filename2 + '_' + str(valu) + '_' + str(Z_direction) + str("_YSlicing.h5") # Assuming Bs is a list of lists h = h5py.File(h5file, 'w') h.create_dataset("data", data=Bs) else: pass def twoDZ_slicings(self, location_slices_pixel_z, export_filename2, bingo, x_actual, y_actual): """3D Plotting function for Z direction slicing""" global canvas1 global canvas2 global canvas3 global numberinput if location_slices_pixel_z in range (len(Z_dir) + 2): pass else: tkMessageBox.askretrycancel("Input Error", "Out of range, The expected range for Z is between 0 to " + str(np.array(Z_dir).max()) + ".") phaseshift = (self.create_pslist(Z_direction))[location_slices_pixel_z] a = np.linspace(init, x_actual, x_size) b = np.linspace(init, y_actual, y_size) X, Z, Y = np.meshgrid(a, Z_dir[location_slices_pixel_z], b) l = phaseshift l[np.isnan(l)] = np.nanmin(l) # Replace NaN with min value of array. fig3, ax1 = plt.subplots(figsize=(9, 9), facecolor='white') im2 = ax1.imshow(l, vmax=np.nanmax(np.array(self.create_pslist(Z_direction))),vmin=np.nanmin(np.array(self.create_pslist(Z_direction))), extent=[init, x_actual, init, y_actual]) mpldatacursor.datacursor(hover=True, bbox=dict(alpha=1, fc='w'), formatter='i, j = {i}, {j}\nz = {z:.02g}'.format) plt.xlabel('X', fontsize=12) plt.ylabel('Y', fontsize=12) plt.title('2D Z Slicing (Z=' + str(round(Z_dir[(location_slices_pixel_z) - 1], 4)) + 'nm) for the ' + str(valu) + ' of AFM data', fontsize=13) # Add and label colorbar cbar = plt.colorbar(im2) cbar.set_label(str(valu)) fig = plt.figure(figsize=(9, 9), facecolor='white') ax = fig.add_subplot(111, projection='3d') im1 = ax.scatter(X, Y, Z, c=l.flatten(), s=6, vmax=np.nanmax(np.array(self.create_pslist(Z_direction))), vmin=np.nanmin(np.array(self.create_pslist(Z_direction)))) # Add and label colorbar cbar = plt.colorbar(im1) cbar.set_label(str(valu)) ax.set_xlim(left=init, right=x_actual) ax.set_ylim(bottom=init, top=y_actual) ax.set_zlim(top=Z_dir.max(), bottom=Z_dir.min()) ax.set_xlabel('X(nm)', fontsize=12) ax.set_ylabel('Y(nm)', fontsize=12) ax.set_zlabel('Z(nm)', fontsize=12) ax.set_title('3D Z Slicing (Z=' + str(round(Z_dir[(location_slices_pixel_z) - 1], 4)) + 'nm) for the ' + str(valu) + ' of AFM data', fontsize=13) root2 = tk.Toplevel(self) root2.state('zoomed') canvas1 = FigureCanvasTkAgg(fig, master=root2) canvas1.draw() canvas1.get_tk_widget().pack(side=tk.LEFT) h5file = export_filename2 + '_' + str(valu) + '_' + str(Z_direction) + str("_ZSlicing.h5") # Assuming phaseshift is a list of lists h = h5py.File(h5file, 'w') h.create_dataset("data", data=phaseshift) setStr = '{}_'.format(export_filename2) + str(valu) + '_Zslices.png' fig.savefig(setStr) canvas2 = FigureCanvasTkAgg(fig3, master=root2) canvas2.draw() canvas2.get_tk_widget().pack(side=tk.LEFT) setStr = '{}_'.format(export_filename2) + str(valu) + '_2d_Zslices.png' fig3.savefig(setStr) if bingo == 1: #Get the HDF5 file for the plotting h5file = export_filename2 + str(Z_direction) + str("_Z.h5") # Assuming phaseshift is a list of lists h = h5py.File(h5file, 'w') h.create_dataset("data", data=phaseshift) else: pass def __init__(self, parent, controller): global txtnslices global txtzdir global txtfilename tk.Frame.__init__(self, parent) tk.Frame.configure(self, background='#ffffff') label1 = ttk.Label(self, text="Step 5: 2D Slicing Plot", font='Huge_Font', background='#ffffff') label1.pack(pady=10, padx=10) label2 = ttk.Label(self, text="Slices Location (nm)", font="Small_Font", background='#ffffff') label2.pack(pady=10, padx=10) txtnslices = ttk.Entry(self) txtnslices.pack() label3 = ttk.Label(self, text="Select the Z Direction", font='Large_Font', background='#ffffff') label3.pack(pady=10, padx=10) lab = LabelFrame(self) lab.pack() listbox = Listbox(lab, exportselection=0) listbox.configure(height=2) listbox.pack() listbox.insert(1, "Retract") listbox.insert(2, "Approach") listbox.bind('<<ListboxSelect>>', self.Curselect6) label4 = ttk.Label(self, text="Export Filename", font="Small_Font", background='#ffffff') label4.pack(pady=10, padx=10) txtfilename = ttk.Entry(self) txtfilename.pack() var00 = IntVar() checkbutton = Checkbutton(self, text="HDF5", variable=var00, bg='white') checkbutton.place(x=900, y=275) button1 = tk.Button(self, text="Get 2D X Slicing Plot", bg="white", command=lambda: (self.get_bingo(var00),self.location_slices(txtnslices), self.export_filename(txtfilename), self.pixel_converter(location_slices), self.twoDX_slicings(location_slices_pixel_x, export_filename2, bingo, x_actual,y_actual))) button1.place(x=645, y=275) button2 = tk.Button(self, text="Get 2D Y Slicing Plot", bg="white", command=lambda: (self.get_bingo(var00), self.location_slices(txtnslices), self.export_filename(txtfilename), self.pixel_converter(location_slices), self.twoDY_slicings(location_slices_pixel_y, export_filename2, bingo, x_actual, y_actual))) button2.place(x=775, y=275) button3 = tk.Button(self, text="Get 2D Z Slicing Plot", bg="white", command=lambda: (self. get_bingo(var00), self.location_slices(txtnslices), self.export_filename(txtfilename), self.pixel_converter(location_slices), self.twoDZ_slicings(location_slices_pixel_z, export_filename2, bingo, x_actual, y_actual))) button3.place(x=645, y=310) button4 = tk.Button(self, text="Get Vector Slicing Plot", bg="white", command=lambda: (self.get_bingo(var00), self.location_slices(txtnslices), self.export_filename(txtfilename), self.pixel_converter(location_slices), self.plot_force(location_slices_pixel_z, x_actual, y_actual))) button4.place(x=775, y=310) button6 = tk.Button(self, text="3D Plot", bg="white", command=lambda: controller.show_frame(threeD_plot)) button6.place(x=645, y=345) button6.config(width=15) button7 = tk.Button(self, text="2D Slicing Animation", bg="white", command=lambda: controller.show_frame(animation_cool)) button7.place(x=775, y=345) button8 = tk.Button(self, text="Organizing Dataset", bg="white", command=lambda: controller.show_frame(load_data)) button8.place(x=720, y=380) button8.config(width=15) button9 = tk.Button(self, text="Home", bg="white", command=lambda: controller.show_frame(data_cleaning)) button9.place(x=720, y=415) button9.config(width=15) label5 = tk.Label(self, text="The reference level for the plots is set as zero at the
# Copyright (C) 2020 Intel Corporation # # SPDX-License-Identifier: MIT import io import os import os.path as osp import tempfile import xml.etree.ElementTree as ET import zipfile from collections import defaultdict from glob import glob from io import BytesIO import copy from shutil import copyfile import itertools from django.contrib.auth.models import Group, User from rest_framework import status from rest_framework.test import APIClient, APITestCase from cvat.apps.engine.media_extractors import ValidateDimension from cvat.apps.dataset_manager.task import TaskAnnotation from datumaro.util.test_utils import TestDir CREATE_ACTION = "create" UPDATE_ACTION = "update" DELETE_ACTION = "delete" class ForceLogin: def __init__(self, user, client): self.user = user self.client = client def __enter__(self): if self.user: self.client.force_login(self.user, backend='django.contrib.auth.backends.ModelBackend') return self def __exit__(self, exception_type, exception_value, traceback): if self.user: self.client.logout() class _DbTestBase(APITestCase): def setUp(self): self.client = APIClient() @classmethod def setUpTestData(cls): cls.create_db_users() @classmethod def create_db_users(cls): (group_admin, _) = Group.objects.get_or_create(name="admin") (group_user, _) = Group.objects.get_or_create(name="user") user_admin = User.objects.create_superuser(username="admin", email="", password="<PASSWORD>") user_admin.groups.add(group_admin) user_dummy = User.objects.create_user(username="user", password="<PASSWORD>") user_dummy.groups.add(group_user) cls.admin = user_admin cls.user = user_dummy def _put_api_v1_task_id_annotations(self, tid, data): with ForceLogin(self.admin, self.client): response = self.client.put("/api/v1/tasks/%s/annotations" % tid, data=data, format="json") return response def _put_api_v1_job_id_annotations(self, jid, data): with ForceLogin(self.admin, self.client): response = self.client.put("/api/v1/jobs/%s/annotations" % jid, data=data, format="json") return response def _patch_api_v1_task_id_annotations(self, tid, data, action, user): with ForceLogin(user, self.client): response = self.client.patch( "/api/v1/tasks/{}/annotations?action={}".format(tid, action), data=data, format="json") return response def _patch_api_v1_job_id_annotations(self, jid, data, action, user): with ForceLogin(user, self.client): response = self.client.patch( "/api/v1/jobs/{}/annotations?action={}".format(jid, action), data=data, format="json") return response def _create_task(self, data, image_data): with ForceLogin(self.user, self.client): response = self.client.post('/api/v1/tasks', data=data, format="json") assert response.status_code == status.HTTP_201_CREATED, response.status_code tid = response.data["id"] response = self.client.post("/api/v1/tasks/%s/data" % tid, data=image_data) assert response.status_code == status.HTTP_202_ACCEPTED, response.status_code response = self.client.get("/api/v1/tasks/%s" % tid) task = response.data return task @staticmethod def _get_tmp_annotation(task, annotation): tmp_annotations = copy.deepcopy(annotation) for item in tmp_annotations: if item in ["tags", "shapes", "tracks"]: for index_elem, _ in enumerate(tmp_annotations[item]): tmp_annotations[item][index_elem]["label_id"] = task["labels"][0]["id"] for index_attribute, attribute in enumerate(task["labels"][0]["attributes"]): spec_id = task["labels"][0]["attributes"][index_attribute]["id"] value = attribute["default_value"] if item == "tracks" and attribute["mutable"]: for index_shape, _ in enumerate(tmp_annotations[item][index_elem]["shapes"]): tmp_annotations[item][index_elem]["shapes"][index_shape]["attributes"].append({ "spec_id": spec_id, "value": value, }) else: tmp_annotations[item][index_elem]["attributes"].append({ "spec_id": spec_id, "value": value, }) return tmp_annotations def _get_jobs(self, task_id): with ForceLogin(self.admin, self.client): response = self.client.get("/api/v1/tasks/{}/jobs".format(task_id)) return response.data def _get_request(self, path, user): with ForceLogin(user, self.client): response = self.client.get(path) return response def _get_request_with_data(self, path, data, user): with ForceLogin(user, self.client): response = self.client.get(path, data) return response def _delete_request(self, path, user): with ForceLogin(user, self.client): response = self.client.delete(path) return response def _download_file(self, url, data, user, file_name): response = self._get_request_with_data(url, data, user) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) response = self._get_request_with_data(url, data, user) self.assertEqual(response.status_code, status.HTTP_200_OK) content = BytesIO(b"".join(response.streaming_content)) with open(file_name, "wb") as f: f.write(content.getvalue()) def _upload_file(self, url, data, user): response = self._put_request_with_data(url, {"annotation_file": data}, user) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) response = self._put_request_with_data(url, {}, user) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def _generate_url_dump_tasks_annotations(self, task_id): return f"/api/v1/tasks/{task_id}/annotations" def _generate_url_upload_tasks_annotations(self, task_id, upload_format_name): return f"/api/v1/tasks/{task_id}/annotations?format={upload_format_name}" def _generate_url_dump_job_annotations(self, job_id): return f"/api/v1/jobs/{job_id}/annotations" def _generate_url_upload_job_annotations(self, job_id, upload_format_name): return f"/api/v1/jobs/{job_id}/annotations?format={upload_format_name}" def _generate_url_dump_dataset(self, task_id): return f"/api/v1/tasks/{task_id}/dataset" def _remove_annotations(self, tid): response = self._delete_request(f"/api/v1/tasks/{tid}/annotations", self.admin) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) return response def _put_request_with_data(self, url, data, user): with ForceLogin(user, self.client): response = self.client.put(url, data) return response def _delete_task(self, tid): response = self._delete_request('/api/v1/tasks/{}'.format(tid), self.admin) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) return response def _check_dump_content(self, content, task_data, format_name, related_files=True): def etree_to_dict(t): d = {t.tag: {} if t.attrib else None} children = list(t) if children: dd = defaultdict(list) for dc in map(etree_to_dict, children): for k, v in dc.items(): dd[k].append(v) d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}} if t.attrib: d[t.tag].update(('@' + k, v) for k, v in t.attrib.items()) if t.text: text = t.text.strip() if children or t.attrib: if text: d[t.tag]['#text'] = text else: d[t.tag] = text return d if format_name == "Kitti Raw Format 1.0": with tempfile.TemporaryDirectory() as tmp_dir: zipfile.ZipFile(content).extractall(tmp_dir) xmls = glob(osp.join(tmp_dir, '**', '*.xml'), recursive=True) self.assertTrue(xmls) for xml in xmls: xmlroot = ET.parse(xml).getroot() self.assertEqual(xmlroot.tag, "boost_serialization") items = xmlroot.findall("./tracklets/item") self.assertEqual(len(items), len(task_data["shapes"])) elif format_name == "Sly Point Cloud Format 1.0": with tempfile.TemporaryDirectory() as tmp_dir: checking_files = [osp.join(tmp_dir, "key_id_map.json"), osp.join(tmp_dir, "meta.json"), osp.join(tmp_dir, "ds0", "ann", "000001.pcd.json"), osp.join(tmp_dir, "ds0", "ann", "000002.pcd.json"), osp.join(tmp_dir, "ds0", "ann","000003.pcd.json")] if related_files: checking_files.extend([osp.join(tmp_dir, "ds0", "related_images", "000001.pcd_pcd", "000001.png.json"), osp.join(tmp_dir, "ds0", "related_images", "000002.pcd_pcd", "000002.png.json"), osp.join(tmp_dir, "ds0", "related_images", "000003.pcd_pcd", "000003.png.json")]) zipfile.ZipFile(content).extractall(tmp_dir) jsons = glob(osp.join(tmp_dir, '**', '*.json'), recursive=True) self.assertTrue(jsons) self.assertTrue(set(checking_files).issubset(set(jsons))) class Task3DTest(_DbTestBase): @classmethod def setUpClass(cls): super().setUpClass() cls.format_names = ["Sly Point Cloud Format 1.0", "Kitti Raw Format 1.0"] cls._image_sizes = {} cls.pointcloud_pcd_filename = "test_canvas3d.zip" cls.pointcloud_pcd_path = osp.join(os.path.dirname(__file__), 'assets', cls.pointcloud_pcd_filename) image_sizes = [] zip_file = zipfile.ZipFile(cls.pointcloud_pcd_path ) for info in zip_file.namelist(): if info.rsplit(".", maxsplit=1)[-1] == "pcd": with zip_file.open(info, "r") as file: data = ValidateDimension.get_pcd_properties(file) image_sizes.append((int(data["WIDTH"]), int(data["HEIGHT"]))) cls.task = { "name": "main task", "owner_id": 1, "assignee_id": 2, "overlap": 0, "segment_size": 100, "labels": [ {"name": "car"}, {"name": "person"}, ] } cls.task_with_attributes = { "name": "task with attributes", "owner_id": 1, "assignee_id": 2, "overlap": 0, "segment_size": 100, "labels": [ {"name": "car", "color": "#2080c0", "attributes": [ { "name": "radio_name", "mutable": False, "input_type": "radio", "default_value": "x1", "values": ["x1", "x2", "x3"] }, { "name": "check_name", "mutable": True, "input_type": "checkbox", "default_value": "false", "values": ["false"] }, { "name": "text_name", "mutable": False, "input_type": "text", "default_value": "qwerty", "values": ["qwerty"] }, { "name": "number_name", "mutable": False, "input_type": "number", "default_value": "-4.0", "values": ["-4", "4", "1"] } ] }, {"name": "person", "color": "#c06060", "attributes": [] }, ] } cls.task_many_jobs = { "name": "task several jobs", "owner_id": 1, "assignee_id": 2, "overlap": 3, "segment_size": 1, "labels": [ { "name": "car", "color": "#c06060", "id": 1, "attributes": [] } ] } cls.cuboid_example = { "version": 0, "tags": [], "shapes": [ { "type": "cuboid", "occluded": False, "z_order": 0, "points": [0.16, 0.20, -0.26, 0, -0.14, 0, 4.84, 4.48, 4.12, 0, 0, 0, 0, 0, 0, 0], "frame": 0, "label_id": None, "group": 0, "source": "manual", "attributes": [] }, ], "tracks": [] } cls._image_sizes[cls.pointcloud_pcd_filename] = image_sizes cls.expected_action = { cls.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'annotation_changed': True}, cls.user: {'name': 'user', 'code': status.HTTP_200_OK, 'annotation_changed': True}, None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'annotation_changed': False}, } cls.expected_dump_upload = { cls.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED, 'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True, 'annotation_loaded': True}, cls.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED, 'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True, 'annotation_loaded': True}, None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED, 'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False, 'annotation_loaded': False}, } def copy_pcd_file_and_get_task_data(self, test_dir): tmp_file = osp.join(test_dir, self.pointcloud_pcd_filename) copyfile(self.pointcloud_pcd_path, tmp_file) task_data = { "client_files[0]": open(tmp_file, 'rb'), "image_quality": 100, } return task_data def test_api_v1_create_annotation_in_job(self): with TestDir() as test_dir: task_data = self.copy_pcd_file_and_get_task_data(test_dir) task = self._create_task(self.task, task_data) task_id = task["id"] annotation = self._get_tmp_annotation(task, self.cuboid_example) for user, edata in list(self.expected_action.items()): with self.subTest(format=edata["name"]): response = self._patch_api_v1_task_id_annotations(task_id, annotation, CREATE_ACTION, user) self.assertEqual(response.status_code, edata["code"]) if edata["annotation_changed"]: task_ann = TaskAnnotation(task_id) task_ann.init_from_db() task_shape = task_ann.data["shapes"][0] task_shape.pop("id") self.assertEqual(task_shape, annotation["shapes"][0]) self._remove_annotations(task_id) def test_api_v1_update_annotation_in_task(self): with TestDir() as test_dir: task_data = self.copy_pcd_file_and_get_task_data(test_dir) task = self._create_task(self.task, task_data) task_id = task["id"] annotation = self._get_tmp_annotation(task, self.cuboid_example) response = self._put_api_v1_task_id_annotations(task_id, annotation) self.assertEqual(response.status_code, status.HTTP_200_OK) for user, edata in list(self.expected_action.items()): with self.subTest(format=edata["name"]): task_ann_prev = TaskAnnotation(task_id) task_ann_prev.init_from_db() annotation["shapes"][0]["points"] = [x + 0.1 for x in annotation["shapes"][0]["points"]] annotation["shapes"][0]["id"] = task_ann_prev.data["shapes"][0]["id"] response = self._patch_api_v1_task_id_annotations(task_id, annotation, UPDATE_ACTION, user) self.assertEqual(response.status_code, edata["code"], task_id) if edata["annotation_changed"]: task_ann = TaskAnnotation(task_id) task_ann.init_from_db() self.assertEqual(task_ann.data["shapes"], annotation["shapes"]) def test_api_v1_remove_annotation_in_task(self): with TestDir() as test_dir: task_data = self.copy_pcd_file_and_get_task_data(test_dir) task = self._create_task(self.task, task_data) task_id = task["id"] annotation = self._get_tmp_annotation(task, self.cuboid_example) for user, edata in list(self.expected_action.items()): with self.subTest(format=edata["name"]): response = self._patch_api_v1_task_id_annotations(task_id, annotation, CREATE_ACTION, self.admin) self.assertEqual(response.status_code, status.HTTP_200_OK) task_ann_prev = TaskAnnotation(task_id) task_ann_prev.init_from_db() annotation["shapes"][0]["id"] = task_ann_prev.data["shapes"][0]["id"] response = self._patch_api_v1_task_id_annotations(task_id, annotation, DELETE_ACTION, user) self.assertEqual(response.status_code, edata["code"]) if edata["annotation_changed"]: task_ann = TaskAnnotation(task_id) task_ann.init_from_db() self.assertTrue(len(task_ann.data["shapes"]) == 0) def test_api_v1_create_annotation_in_jobs(self): with TestDir() as test_dir: task_data = self.copy_pcd_file_and_get_task_data(test_dir) task = self._create_task(self.task, task_data) task_id = task["id"] annotation = self._get_tmp_annotation(task, self.cuboid_example) jobs = self._get_jobs(task_id) for user, edata in list(self.expected_action.items()): with self.subTest(format=edata["name"]): response = self._patch_api_v1_job_id_annotations(jobs[0]["id"], annotation, CREATE_ACTION, user) self.assertEqual(response.status_code, edata["code"]) task_ann = TaskAnnotation(task_id) task_ann.init_from_db() if len(task_ann.data["shapes"]): task_shape = task_ann.data["shapes"][0] task_shape.pop("id") self.assertEqual(task_shape, annotation["shapes"][0]) self._remove_annotations(task_id) def test_api_v1_update_annotation_in_job(self): with TestDir() as test_dir: task_data = self.copy_pcd_file_and_get_task_data(test_dir) task = self._create_task(self.task, task_data) task_id = task["id"] jobs = self._get_jobs(task_id) annotation = self._get_tmp_annotation(task, self.cuboid_example) response = self._put_api_v1_task_id_annotations(task_id, annotation) self.assertEqual(response.status_code, status.HTTP_200_OK) for user, edata in list(self.expected_action.items()): with self.subTest(format=edata["name"]): task_ann_prev = TaskAnnotation(task_id) task_ann_prev.init_from_db() annotation["shapes"][0]["points"] = [x + 0.1 for x in annotation["shapes"][0]["points"]] annotation["shapes"][0]["id"] = task_ann_prev.data["shapes"][0]["id"] response = self._patch_api_v1_job_id_annotations(jobs[0]["id"], annotation, UPDATE_ACTION, user) self.assertEqual(response.status_code, edata["code"]) if edata["annotation_changed"]: task_ann = TaskAnnotation(task_id) task_ann.init_from_db() self.assertEqual(task_ann.data["shapes"], annotation["shapes"]) def test_api_v1_remove_annotation_in_job(self): with TestDir() as test_dir: task_data = self.copy_pcd_file_and_get_task_data(test_dir) task = self._create_task(self.task, task_data) task_id = task["id"] jobs = self._get_jobs(task_id) annotation = self._get_tmp_annotation(task, self.cuboid_example) for user, edata in list(self.expected_action.items()):
#!/usr/bin/env python3 import netcdf4_functions as nffun import socket, os, sys, csv, time, math, numpy import re, subprocess from optparse import OptionParser #from Numeric import * #runcase.py does the following: # # 1. Call routines to create surface and domain data (makepointdata.py) # 2. Use create_newcase to build the new case with specified options # 3. Set point and case-epecific namelist options # 4. configure case # 5. build (compile) ACME with clean_build first if requested # 6. apply user-specified PBS and submit information # 7. submit single run or parameter ensemble job to PBS queue. # #-------------------Parse options----------------------------------------------- parser = OptionParser() parser.add_option("--caseidprefix", dest="mycaseid", default="", \ help="Unique identifier to include as a prefix to the case name") parser.add_option("--caseroot", dest="caseroot", default='', \ help = "case root directory (default = ./, i.e., under scripts/)") parser.add_option("--dailyrunoff", dest="dailyrunoff", default=False, \ action="store_true", help="Write daily output for hydrology") parser.add_option("--diags", dest="diags", default=False, \ action="store_true", help="Write special outputs for diagnostics") parser.add_option("--debugq", dest="debug", default=False, \ action="store_true", help='Use debug queue') parser.add_option("--runroot", dest="runroot", default="", \ help="Directory where the run would be created") parser.add_option('--project', dest='project',default='', \ help='Set project') parser.add_option("--exeroot", dest="exeroot", default="", \ help="Location of executable") parser.add_option("--lat_bounds", dest="lat_bounds", default='-999,-999', \ help = 'latitude range for regional run') parser.add_option("--lon_bounds", dest="lon_bounds", default='-999,-999', \ help = 'longitude range for regional run') parser.add_option("--humhol", dest="humhol", default=False, \ help = 'Use hummock/hollow microtopography', action="store_true") parser.add_option("--marsh", dest="marsh", default=False, \ help = 'Use marsh hydrology/elevation', action="store_true") parser.add_option("--mask", dest="mymask", default='', \ help = 'Mask file to use (regional only)') parser.add_option("--model", dest="mymodel", default='', \ help = 'Model to use (ELM,CLM5)') parser.add_option("--monthly_metdata", dest="monthly_metdata", default = '', \ help = "File containing met data (cpl_bypass only)") parser.add_option("--namelist_file", dest="namelist_file", default='', \ help="File containing custom namelist options for user_nl_clm") parser.add_option("--ilambvars", dest="ilambvars", default=False, \ action="store_true", help="Write special outputs for diagnostics") parser.add_option("--dailyvars", dest="dailyvars", default=False, \ action="store_true", help="Write daily ouptut variables") parser.add_option("--res", dest="res", default="CLM_USRDAT", \ help='Resoultion for global simulation') parser.add_option("--point_list", dest="point_list", default='', \ help = 'File containing list of points to run') parser.add_option("--pft", dest="mypft", default=-1, \ help = 'Use this PFT for all gridcells') parser.add_option("--site_forcing", dest="site_forcing", default='', \ help = '6-character FLUXNET code for forcing data') parser.add_option("--site", dest="site", default='', \ help = '6-character FLUXNET code to run (required)') parser.add_option("--sitegroup", dest="sitegroup", default="AmeriFlux", \ help = "site group to use (default AmeriFlux)") parser.add_option("--coldstart", dest="coldstart", default=False, \ help = "set cold start (mutually exclusive w/finidat)", \ action="store_true") parser.add_option("--compset", dest="compset", default='I1850CNPRDCTCBC', \ help = "component set to use (required)\n" "Currently supports ONLY *CLM45(CN) compsets") parser.add_option("--cruncep", dest="cruncep", default=False, \ help = "use cru-ncep data", action="store_true") parser.add_option("--cruncepv8", dest="cruncepv8", default=False, \ help = "use cru-ncep data", action="store_true") parser.add_option("--cplhist", dest="cplhist", default=False, \ help= "use CPLHIST forcing", action="store_true") parser.add_option("--gswp3", dest="gswp3", default=False, \ help= "use GSWP3 forcing", action="store_true") parser.add_option("--princeton", dest="princeton", default=False, \ help= "use Princecton forcing", action="store_true") parser.add_option("--livneh", dest="livneh", default=False, \ action="store_true", help = "Livneh correction to CRU precip (CONUS only)") parser.add_option("--daymet", dest="daymet", default=False, \ action="store_true", help = "Daymet correction to GSWP3 precip (CONUS only)") parser.add_option("--machine", dest="machine", default = '', \ help = "machine to\n") parser.add_option("--compiler", dest="compiler", default='', \ help = "compiler to use (pgi, gnu)") parser.add_option("--mpilib", dest="mpilib", default="mpi-serial", \ help = "mpi library (openmpi*, mpich, ibm, mpi-serial)") parser.add_option("--ad_spinup", action="store_true", \ dest="ad_spinup", default=False, \ help = 'Run accelerated decomposition spinup') parser.add_option("--exit_spinup", action="store_true", \ dest="exit_spinup", default=False, \ help = 'Run exit spinup (CLM 4.0 only)') parser.add_option("--model_root", dest="csmdir", default='', \ help = "base model directory") parser.add_option("--ccsm_input", dest="ccsm_input", default='', \ help = "input data directory for CESM (required)") parser.add_option("--finidat_case", dest="finidat_case", default='', \ help = "case containing initial data file to use" \ +" (should be in your run directory)") parser.add_option("--finidat", dest="finidat", default='', \ help = "initial data file to use" \ +" (should be in your run directory)") parser.add_option("--finidat_year", dest="finidat_year", default=-1, \ help = "model year of initial data file (default is" \ +" last available)") parser.add_option("--run_units", dest="run_units", default='nyears', \ help = "run length units (ndays, nyears)") parser.add_option("--run_n", dest="run_n", default=50, \ help = "run length (in run units)") parser.add_option("--rest_n", dest="rest_n", default=-1, \ help = "restart interval (in run units)") parser.add_option("--run_startyear", dest="run_startyear",default=-1, \ help='Starting year for model output') parser.add_option("--rmold", dest="rmold", default=False, action="store_true", \ help = 'Remove old case directory with same name' \ +" before proceeding") parser.add_option("--srcmods_loc", dest="srcmods_loc", default='', \ help = 'Copy sourcemods from this location') parser.add_option("--parm_file", dest="parm_file", default='', help = 'file for parameter modifications') parser.add_option("--parm_vals", dest="parm_vals", default="", \ help = 'User specified parameter values') parser.add_option("--parm_file_P", dest="parm_file_P", default='', help = 'file for P parameter modifications') parser.add_option("--hist_mfilt", dest="hist_mfilt", default=-1, \ help = 'number of output timesteps per file') parser.add_option("--hist_nhtfrq", dest="hist_nhtfrq", default=-999, \ help = 'output file timestep') parser.add_option("--hist_vars", dest="hist_vars", default='', \ help = 'use hist_vars file') #parser.add_option("--queue", dest="queue", default='essg08q', \ # help = 'PBS submission queue') parser.add_option("--clean_config", dest="clean_config", default=False, \ help = 'Run cesm_setup -clean script') parser.add_option("--clean_build", dest="clean_build", default=False, \ help = 'Perform clean build before building', \ action="store_true") parser.add_option("--no_config", dest="no_config", default=False, \ help = 'do NOT configure case', action="store_true") parser.add_option("--no_build", dest="no_build", default=False, \ help = 'do NOT build CESM', action="store_true") parser.add_option("--no_submit", dest="no_submit", default=False, \ help = 'do NOT submit CESM to queue', action="store_true") parser.add_option("--align_year", dest="align_year", default=-999, \ help = 'Alignment year (transient run only)') parser.add_option("--np", dest="np", default=1, \ help = 'number of processors') parser.add_option("--ninst", dest="ninst", default=1, \ help = 'number of land model instances') parser.add_option("--ng", dest="ng", default=64, \ help = 'number of groups to run in ensmble mode') parser.add_option("--tstep", dest="tstep", default=0.5, \ help = 'CLM timestep (hours)') parser.add_option("--co2_file", dest="co2_file", default="fco2_datm_rcp4.5_1765-2500_c130312.nc", \ help = 'CLM timestep (hours)') parser.add_option("--nyears_ad_spinup", dest="ny_ad", default=250, \ help = 'number of years to run ad_spinup') parser.add_option("--metdir", dest="metdir", default="none", \ help = 'subdirectory for met data forcing') parser.add_option("--nopointdata", action="store_true", \ dest="nopointdata", help="Do NOT make point data (use data already created)", \ default=False) #parser.add_option("--cleanlogs",dest="cleanlogs", help=\ # "Removes temporary and log files that are created",\ # default=False,action="store_true") parser.add_option("--nofire", action="store_true", dest="nofire", default=False, \ help="To turn off wildfires") parser.add_option("--nopftdyn", action="store_true", dest="nopftdyn", \ default = False, help='Do not use dynamic PFT file') parser.add_option("--harvmod", action="store_true", dest="harvmod", \ default=False, help = "Turn on harvest modificaton" \ "All harvest is performed in first timestep") parser.add_option("--no_dynroot", dest="no_dynroot", default=False, \ help = 'Turn off dynamic root distribution', action="store_true") parser.add_option("--bulk_denitrif", dest="bulk_denitrif", default=False, \ help = 'To turn off BGC nitrification-denitrification', action="store_true") parser.add_option("--vertsoilc", dest="vsoilc", default=False, \ help = 'To turn on CN with multiple soil layers, excluding CENTURY C module (CLM4ME on as well)', action="store_true") parser.add_option("--centbgc", dest="centbgc", default=False, \ help = 'To turn on CN with multiple soil layers, CENTURY C module (CLM4ME on as well)', action="store_true") parser.add_option("--CH4", dest="CH4", default=False, \ help = 'To turn on CN with CLM4me', action="store_true") parser.add_option("--1850_ndep", dest="ndep1850", default=False, \ help = 'Use constant 1850 N deposition', action="store_true") parser.add_option("--1850_aero", dest="aero1850", default=False, \ help = 'Use constant 1850 aerosol deposition', action="store_true") parser.add_option("--1850_co2", dest="co21850", default=False, \ help = 'Use constant 1850 CO2 concentration', action="store_true") parser.add_option("--C13", dest="C13", default=False, \ help = 'Switch to turn on C13', action="store_true") parser.add_option("--C14", dest="C14", default=False, \ help = 'Use C14 as C13 (no decay)', action="store_true") parser.add_option("--branch", dest="branch", default=False, \ help = 'Switch for branch run', action="store_true") parser.add_option("--makemetdata", dest="makemet", default=False, \ help = 'Generate meteorology', action="store_true") parser.add_option("--surfdata_grid", dest="surfdata_grid", default=False, \ help = 'Use gridded surface data instead of site data', action="store_true") parser.add_option("--include_nonveg", dest="include_nonveg", default=False, \ help = 'Include non-vegetated columns/Landunits in surface data') parser.add_option("--trans2", dest="trans2", default=False, action="store_true", \ help = 'Tranisnent phase 2 (1901-2010) - CRUNCEP only') parser.add_option("--spinup_vars", dest="spinup_vars", default=False, \ help = 'Limit output vars in spinup runs', action="store_true") parser.add_option("--trans_varlist", dest = "trans_varlist", default='', help = "Transient outputs") parser.add_option("--c_only", dest="c_only", default=False, \ help="Carbon only (supplemental P and N)", action="store_true") parser.add_option("--cn_only", dest="cn_only", default=False, \ help = 'Carbon/Nitrogen only (supplemental P)', action="store_true") parser.add_option("--cp_only", dest="cp_only", default=False, \ help = 'Carbon/Phosphorus only (supplemental N)', action = "store_true") parser.add_option("--ensemble_file", dest="ensemble_file", default='', \ help = 'Parameter sample file to generate ensemble') parser.add_option("--mc_ensemble", dest="mc_ensemble", default=-1, \ help = 'Monte Carlo ensemble (argument is # of simulations)') parser.add_option("--ensemble_nocopy", dest="ensemble_nocopy", default=False, \ help = 'Do not copy files to ensemble directories', action="store_true") parser.add_option("--surffile", dest="surffile", default="", \ help = 'Surface file to use') parser.add_option("--domainfile", dest="domainfile", default="", \ help = 'Domain file to use') parser.add_option("--fates_hydro", dest="fates_hydro", default=False, action="store_true", \ help = 'Set fates hydro to true') parser.add_option("--fates_paramfile", dest="fates_paramfile", default="", \ help = 'Fates parameter file to use') parser.add_option("--var_soilthickness", dest="var_soilthickness", default=False, \ help = 'Use variable soil thickness from surface data', action="store_true") parser.add_option("--add_temperature", dest="addt", default=0.0, \ help = 'Temperature to add to atmospheric forcing') parser.add_option("--add_co2", dest="addco2", default=0.0, \ help = 'CO2 (ppmv) to add to atmospheric forcing') parser.add_option("--startdate_add_temperature", dest="sd_addt", default="99991231", \ help = 'Date (YYYYMMDD) to begin addding temperature') parser.add_option("--startdate_add_co2", dest="sd_addco2", default="99991231", \ help = 'Date (YYYYMMDD) to begin addding CO2') #Changed by Ming for mesabi parser.add_option("--archiveroot", dest="archiveroot", default='', \ help = "archive root directory only for mesabi") #Added by Kirk to include the modified parameter file parser.add_option("--mod_parm_file", dest="mod_parm_file", default='', \ help = "adding the path to the modified parameter file") parser.add_option("--mod_parm_file_P", dest="mod_parm_file_P", default='', \ help = "adding the path to
# by default, all! # Ensure column names are unicode df.columns = [unicode(cleanup_name(x, normalize=False, clean_nonletters=False), errors='ignore') for x in df.columns] # By default, take all columns cols = df.columns # Calculate total number of items if progress_bar: pbar_total = 0 for col in cols: pbar_total += len(df[col].index) # Main loop if progress_bar: pbar = _tqdm(total=pbar_total, desc='UNICODE', disable=(not progress_bar)) for col in cols: for idx in df[col].index: # Unidecode if value is a string if isinstance(df.loc[idx,col], str): # if item is already unicode, we don't need to do anything (also if item is null or another type, we do not need to decode) df.loc[idx,col] = string_to_unicode(df.loc[idx,col], failsafe_encoding=failsafe_encoding, skip_errors=skip_errors) if progress_bar: pbar.update() # Restore index if idxbak: df.set_index(idxbak, inplace=True) return df def df_to_unicode_fast(df_in, cols=None, replace_ascii=False, skip_errors=False, progress_bar=False): """Ensure unicode encoding for all strings in the specified columns of a dataframe in a fast way, and optionally by replacing non recognized characters by ascii equivalents. Also ensures that columns names are correctly decodable as unicode if cols=None. If cols=None, will walk through all columns. If replace_ascii, will replace special characters with the closest ASCII counterpart (using unidecode) if the conversion to unicode fails If skip_errors=True, the unicode encoding will be forced by skipping undecodable characters (errors='ignore'). The main difference with df_to_unicode() is that the former tries to maintain special characters (instead of replacing them with their closest ascii counterpart) and it is slower (but more thorough, it should not miss any field, whereas the fast version will work column by column and thus might miss a column of mixed types). """ # Make a copy to avoid tampering the original df = df_in.copy() # If there is a complex index, it might contain strings, so we reset it as columns so that we can unidecode indices too, and we will restore the indices at the end if df.index.dtype.name == 'object' or isinstance(df.index, pd.core.indexes.multi.MultiIndex): idxbak = df.index.names df.reset_index(inplace=True) else: idxbak = None # Which columns do we have to unidecode? if cols is None: # by default, all! # Ensure column names are unicode df.columns = [unicode(cleanup_name(x, normalize=False, clean_nonletters=False), errors='ignore') for x in df.columns] # Use the new column names cols = df.columns if skip_errors: serrors = 'ignore' else: serrors = 'strict' for col in _tqdm(cols, desc='UNICODE', disable=(not progress_bar)): # Verify that the column is of type object, else for sure it is not a string # also if there are duplicate names, just skip these columns # TODO: try to process columns with duplicate names if (len(df.loc[:, col].shape) > 1 and df.loc[:, col].shape[1] > 1) or df.loc[:, col].dtype.name != 'object': continue try: # First try a decoding by detecting the correct encoding #encoding = chardet.detect(''.join(df.loc[:, col]))['encoding'] allvals = (x if isinstance(x, basestring) else str(x) for _, x in df.loc[:, col].items()) allvalsjoined = ''.join(allvals) if isinstance(allvalsjoined, unicode): # if unicode, skip decoding encoding = None else: encoding = chardet.detect(allvalsjoined)['encoding'] if encoding: df.loc[:, col] = df.loc[:, col].apply(lambda x: x.decode(encoding, errors=serrors) if isinstance(x, str) else x) #df.loc[:, col] = df.loc[:, col].astype('unicode') # DEPRECATED: works but if we do this, all null values (nan, nat, etc) will be converted to strings and become very difficult to process (eg, not detectable using pd.isnull())! #df[col] = df[col].map(lambda x: x.encode('unicode-escape').decode('utf-8')) except Exception as exc: try: # If decoding failed, we can try to replace the special characters with their closest ASCII counterpart (via unidecode) if replace_ascii: df.loc[:, col] = df.loc[:, col].apply(lambda x: unicode(cleanup_name(x, normalize=False, clean_nonletters=False), errors=serrors) if isinstance(x, str) else x) #df.loc[:, col] = df.loc[:, col].astype('unicode') # DEPRECATED: works but if we do this, all null values (nan, nat, etc) will be converted to strings and become very difficult to process (eg, not detectable using pd.isnull())! else: raise except Exception as exc: # Else everything failed! if skip_errors: pass else: print('Failed with column: %s' % col) raise # Restore index if idxbak: df.set_index(idxbak, inplace=True) return df def df_encode(df_in, cols=None, encoding='utf-8', skip_errors=False, decode_if_errors=False, progress_bar=False): """Encode all unicode strings in a dataframe into a string of the chosen encoding. When decode_if_errors is True, if a string (str) is found, an attempt will be made to decode it using an autodetection of the encoding, to make a unicode sandwich.""" # Make a copy to avoid tampering the original df = df_in.copy() # If there is a complex index, it might contain strings, so we reset it as columns so that we can unidecode indices too, and we will restore the indices at the end if df.index.dtype.name == 'object' or isinstance(df.index, pd.core.indexes.multi.MultiIndex): idxbak = df.index.names df.reset_index(inplace=True) else: idxbak = None # Which columns do we have to unidecode? if cols is None: # by default, all! # Ensure column names are encoded df.columns = [x.encode(encoding) for x in df.columns] # Use all columns, and the new column names encoded cols = df.columns # Calculate total number of items if progress_bar: pbar_total = 0 for col in cols: pbar_total += len(df[col].index) # Main loop if progress_bar: pbar = _tqdm(total=pbar_total, desc='UNICODE', disable=(not progress_bar)) for col in cols: for idx in df[col].index: # Unidecode if value is a string if isinstance(df.loc[idx,col], (basestring, unicode)): try: # Try to encode to utf-8, but only if it is unicode if isinstance(df.loc[idx,col], unicode): df.loc[idx,col] = df.loc[idx,col].encode(encoding) elif decode_if_errors: df.loc[idx,col] = string_to_unicode(df.loc[idx,col]).encode(encoding) else: raise ValueError('Error at column "%s" index %s: not unicode!') except UnicodeDecodeError as exc: # At worst, try unidecode if skip_errors: df.loc[idx,col] = _unidecode(df.loc[idx,col]).encode(encoding) else: print('Error at column "%s" index %s' % (col, str(idx))) raise if progress_bar: pbar.update() # Restore index if idxbak: df.set_index(idxbak, inplace=True) return df def df_literal_eval(xin, aggressive=False): """Evaluate each string cell of a DataFrame as if it was a Python object, and return the Python object aggressive=True makes the algorithm remove any list separator beforehand to separate the contained items to the maximum, this allows working with nested lists for instance""" if aggressive: x = str(xin).replace('[', '').replace(']', '').replace('{', '').replace('}', '').replace("'", '') else: x = xin try: # Try to evaluate using ast return(ast.literal_eval(x)) except (SyntaxError, ValueError): try: # Else evaluate as a list without quotes if not ((x.startswith('[') or x.startswith('{')) and (x.endswith(']') or x.endswith('}'))): raise Exception() return re.split(',\s*u?', re.sub('[\[\]\{\}]', '', x)) # TODO: implement a real parser using pyparser: https://stackoverflow.com/a/1894785 except Exception as exc: # Else simply return the item as-is return xin def df_cols_lower(df_in, col='name'): """Find in a DataFrame any column matching the col argument in lowercase and rename all found columns to lowercase""" # Make a copy to avoid tampering the original df = df_in.copy() # Find and rename any column "Name" or "NAME" to lowercase "name" namecols = df.columns[[True if x.lower() == col.lower() else False for x in df.columns]] if len(namecols) > 0: df = df.rename(columns={x: x.lower() for x in namecols}) return df def date_fr2en(s): """Convert french month names into english so that dateutil.parse works""" if isinstance(s, basestring): s = s.lower() rep = { 'jan\w+': 'jan', 'fe\w+': 'feb', 'mar\w+': 'march', 'av\w+': 'april', 'mai\w+': 'may', 'juin\w+': 'june', 'juil\w+': 'july', 'ao\w+': 'august', 'se\w+': 'september', 'oc\w+': 'october', 'no\w+': 'november', 'de\w+': 'december', } for m, r in rep.items(): s = re.sub(m, r, s) return s def date_cleanchar(s): """Clean a date from any non useful character (else dateutil_parser will fail, eg with a '?')""" if isinstance(s, basestring): s = s.lower() res = re.findall('[\d/-:\s]+', s, re.UNICODE) if res: return '-'.join(res) else: return None else: return s def date_clean(s): """Clean the provided string and parse as a date using dateutil.parser fuzzy matching (alternative to pd.to_datetime()). Should be used with df[col].apply(df_date_clean).""" if pd.isnull(s): return None else: # Clean non date characters (might choke the date parser) cleaned_date = date_fr2en(date_cleanchar(str(s))) # convert to str so that if we get a datetime object, we do not get an error if not cleaned_date: return None else: try: # First try an ISO date parsing, this is to circumvent bad decoding when month is in the middle, see: https://github.com/dateutil/dateutil/pull/340 return dateutil_parser.isoparse(cleaned_date) except ValueError
# -*- coding: utf-8 -*- import logging import functools from collections import OrderedDict from fabric2.runners import Result from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from django.utils.text import camel_case_to_spaces from django.dispatch import Signal from .models import Host from .models import MinkeModel from .models import MinkeSession from .models import BaseMessage from .forms import CommandForm from .messages import PreMessage from .messages import TableMessage from .messages import ExecutionMessage from .exceptions import InvalidMinkeSetup from .exceptions import SessionRegistrationError from .utils import FormatDict logger = logging.getLogger(__name__) class RegistryDict(OrderedDict): """ A reload-able session-registry. """ reload_sessions = Signal(providing_args=['session_name']) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._static_sessions = None def reload(self, session_name=None): """ Load dynamical sessions into the registry. Reset the registry to the static sessions. Then send a reload-signal. """ # We backup the static-sessions and reset the registry before each # reload. This way the reload algorithms doesn't have to unregister # obsolete sessions. if self._static_sessions: self.clear() self.update(self._static_sessions) else: self._static_sessions = self.copy() # no reloading needed for static sessions if session_name and session_name in self: return # trigger the reload signal self.reload_sessions.send(sender=self.__class__, session_name=session_name) REGISTRY = RegistryDict() class SessionGroup: """ A group of session - displayed as optgroups in the select-widget. Create a group and use it as decorator for Sessions:: my_group = SessionGroup('My Group') @my_group class MySession(Session): pass """ def __init__(self, name=None): self.name = name def __call__(self, cls, name=None): cls.group = name or self.name return cls class SessionRegistration(type): """ metaclass for Sessions that implements session-registration """ def __new__(cls, name, bases, dct): # Setting the abstract-attr explicitly avoids its inheritance. dct['abstract'] = dct.get('abstract', False) return super().__new__(cls, name, bases, dct) def __init__(cls, classname, bases, attrs): super().__init__(classname, bases, attrs) if not cls.abstract: cls.register() if cls.auto_permission: cls.add_permission() def register(cls): """ Register the session-class. """ # some sanity-checks if cls.__name__ in REGISTRY: msg = 'A session with that name was already registered.' raise SessionRegistrationError(cls, msg) if not cls.work_on: msg = 'At least one minke-model must be specified.' raise SessionRegistrationError(cls, msg) for model in cls.work_on: try: assert(model == Host or issubclass(model, MinkeModel)) except (TypeError, AssertionError): msg = '{} is no minke-model.'.format(model) raise SessionRegistrationError(cls, msg) if issubclass(cls, SingleCommandSession) and not cls.command: msg = 'SingleCommandSession needs to specify an command.' raise SessionRegistrationError(cls, msg) if issubclass(cls, CommandChainSession) and not cls.commands: msg = 'CommandChainSession needs to specify commands.' raise SessionRegistrationError(cls, msg) # TODO: Check for recursion in SessionChains # TODO: Check SessionChain's sessions if issubclass(cls, SessionChain) and not cls.sessions: msg = 'SessionChain needs to specify sessions.' raise SessionRegistrationError(cls, msg) # set verbose-name if missing if not cls.verbose_name: cls.verbose_name = camel_case_to_spaces(cls.__name__) # register session REGISTRY[cls.__name__] = cls def _get_permission(cls): codename = 'run_{}'.format(cls.__name__.lower()) name = 'Can run {}'.format(cls.__name__) lookup = 'minke.{}'.format(codename) return codename, name, lookup def create_permission(cls): """ Create a run-permission for this session-class. """ content_type = ContentType.objects.get_for_model(MinkeSession) codename, name, lookup = cls._get_permission() permission, created = Permission.objects.update_or_create( codename=codename, content_type=content_type, defaults=dict(name=name)) return permission, created def delete_permission(cls): codename, name, lookup = cls._get_permission() try: Permission.objects.get(codename=codename).delete() except Permission.DoesNotExist: pass else: cls.permissions = tuple(set(cls.permissions) - set((lookup,))) def add_permission(cls): codename, name, lookup = cls._get_permission() cls.permissions = tuple(set(cls.permissions) | set((lookup,))) def protect(method): """ Decorator for session-methods to defer their interrupt. """ @functools.wraps(method) def wrapper(obj, *args, **kwargs): # are we already protected? if obj._busy: return method(obj, *args, **kwargs) # otherwise protect the method-call by setting the busy-flag obj._busy = True result = method(obj, *args, **kwargs) # if interruption was deferred now is the time to raise it if obj._stopped: raise KeyboardInterrupt obj._busy = False return result return wrapper class Session(metaclass=SessionRegistration): """Base-class for all session-classes. All session-classes must inherit from Session. By defining a subclass of Session the subclass will be implicitly registered as session-class and also a run-permission will be created for it. To prevent this behavior use an abstract session by setting :attr:`.abstract` to True. Each session will be instantiated with a fabric-:doc:`fabric:api/connection` and an object of :class:`~.models.MinkeSession`. The connection-object provides the remote-access, while the minkesession is the database- representation of a specific session running for a specific :class:`minkemodel-object <.models.MinkeModel>`. For a session-class to be useful you at least has to define the :meth:`.process`-method and add one or more :class:`~.models.MinkeModel` to :attr:`.work_on`-attribute. """ abstract = True """ An abstract session-class won't be registered itself. This is useful if your session-class should be a base-class for other sessions. Abstract session-classes can be registered manually by calling its classmethod :meth:`~.SessionRegistration.register`:: MySession.register() This won't add a run-permission-lookup-string to :attr:`.permissions`. To do so use the classmethod :meth:`.SessionRegistration.add_permission`:: MySession.add_permission() """ verbose_name = None """Display-name for sessions.""" group = None """ Group-name used as optgroup-tag in the select-widget. Best practice to group sessions is to use a :class:`.SessionGroup`. """ work_on = tuple() """Tuple of minke-models. Models the session can be used with.""" permissions = tuple() """ Tuple of permission-strings. To be able to run a session a user must have all the permissions listed. The strings should have the following format: "<app-label>.<permission's-codename>. """ auto_permission = True """ If True a lookup-string for a session-specific run-permission will be automatically added to :attr:`.permissions`. Note ---- To avoid database-access on module-level we won't create the permission itself. Once you setup your sessions you could create run-permissions for all sessions using the api-command:: $ ./manage.py minkeadm --create-permissions """ # TODO: Make this a tuple or list of forms to render. form = None """ An optional form that will be rendered before the session will be processed. The form-data will be accessible within the session as the data-property. Use it if the session's processing depends on additional user-input-data. Instead of setting the form-attribute you can also directly overwrite :meth:`.get_form`. """ confirm = False """ If confirm is true, the admin-site asks for a user-confirmation before processing a session, which also allows to review the objects the session was revoked with. """ # TODO: This should be renamed to something like context. invoke_config = dict() """ Session-specific fabric- and invoke-configuration-parameters which will be used to initialize a :class:`fabric-connection <fabric.connection.Connection>`. The keys must be formatted in a way that is accepted by :meth:`~.fabrictools.FabricConfig.load_snakeconfig`. See also the documentation for the configuration of :doc:`fabric <fabric:concepts/configuration>` and :doc:`invoke <invoke:concepts/configuration>`. """ parrallel_per_host = False """ Allow parrallel processing of multiple celery-tasks on a single host. If multiple minke-objects are associated with the same host all tasks running on them would be processed in a serial manner by default. This is to protect the ressources of the host-system. If you want to allow parrallel processing of multiple celery-tasks on a single host set parrallel_per_host to True. Note ---- To perform parrallel task-execution on a single host we make use of celery's chords-primitive, which needs a functioning result-backend to be configured. Please see the :ref:`celery-documentation <chord-important-notes>` for more details. """ def __init__(self, con, db, minkeobj=None): """Session's init-method. Parameters ---------- con : obj of :class:`fabric.connection.Connection` db : obj of :class:`~.models.MinkeSession` minkeobj : obj of :class:`~.models.Minkeobj` (optional) Only required if you want to initialize a session out of another session and let it work on a different minkeobj. """ # TODO: Update the connection dict with the minkeobj.data. # Maybe use a prefix on form fields for context data. self._c = con self._db = db self._minkeobj = minkeobj self._stopped = False self._busy = False self.start = db.start self.end = db.end @classmethod def get_form(cls): """ Return :attr:`.form` by default. Overwrite this method if you need to setup your form-class dynamically. """ return cls.form @property def c(self): """ Refers to the :class:`fabric.connection.Connection`-object the session was initialized with. """ return self._c @property def minkeobj(self): """ Refers to :attr:`.models.MinkeSession.minkeobj`. """ return self._minkeobj or self._db.minkeobj @property def status(self): """ Refers to :attr:`.models.MinkeSession.session_status`. """ return self._db.session_status @property def data(self): """ Refers to :attr:`.models.MinkeSession.session_data`. This model-field holds all the data that comes from :attr:`.form`. """ return self._db.session_data def stop(self, *arg, **kwargs): """Interrupt the session's processing. This method could be called twice. The first time it will initiate a soft interruption which means a current remote-process won't be interrupted. The session will be stopped subsequently. If it is called meanwhile a second time, the session will be killed immediately. Note ----
fetch_subtract_a(self): data = self.fetch() # 1 cycle self.compare_a_simple(data) # 1 cycle self.a.sub(data, False) def compare_a(self, getCaller, setCaller=None): # 1 cycle self.compare_a_simple(int(self.a.get() - getCaller.get())) def compare_a_simple(self, s): s = s & 0xFF self.f.reset() self.f.n_flag = True self.f.z_flag_compare(s) self.hc_flag_finish(s) self.cycles -= 1 def hc_flag_finish(self, data): if data > self.a.get(): self.f.c_flag = True self.f.h_flag_compare(self.a.get(), data) def AND(self, getCaller, setCaller=None): # 1 cycle self.a.set(self.a.get() & getCaller.get()) # 1 cycle self.f.z_flag_compare(self.a.get(), reset=True) def XOR(self, getCaller, setCaller=None): # 1 cycle self.a.set( self.a.get() ^ getCaller.get()) # 1 cycle self.f.z_flag_compare(self.a.get(), reset=True) def OR(self, getCaller, setCaller=None): # 1 cycle self.a.set(self.a.get() | getCaller.get()) # 1 cycle self.f.z_flag_compare(self.a.get(), reset=True) def inc_double_register(self, doubleRegister): doubleRegister.inc() def dec_double_register(self, doubleRegister): doubleRegister.dec() def inc(self, getCaller, setCaller): # 1 cycle data = (getCaller.get() + 1) & 0xFF self.dec_inc_flag_finish(data, setCaller, 0x00) def dec(self, getCaller, setCaller): # 1 cycle data = (getCaller.get() - 1) & 0xFF self.dec_inc_flag_finish(data, setCaller, 0x0F) self.f.n_flag = True def dec_inc_flag_finish(self, data, setCaller, compare): self.f.partial_reset(keep_c=True) self.f.z_flag_compare(data) if (data & 0x0F) == compare: self.f.h_flag = True setCaller.set(data) # 1 cycle def rotate_left_circular(self, getCaller, setCaller): # RLC 1 cycle data = getCaller.get() s = (data << 1) + (data >> 7) self.flags_and_setter_finish(s, setCaller, 0x80) #self.cycles -= 1 def rotate_left_circular_a(self): # RLCA rotate_left_circular_a 1 cycle self.rotate_left_circular(RegisterCallWrapper(self.a), \ RegisterCallWrapper(self.a)) def rotate_left(self, getCaller, setCaller): # 1 cycle s = (getCaller.get() << 1) & 0xFF if self.f.c_flag: s += 0x01 self.flags_and_setter_finish(s, setCaller, 0x80) # 1 cycle def rotate_left_a(self): # RLA 1 cycle self.rotate_left(RegisterCallWrapper(self.a), \ RegisterCallWrapper(self.a)) def rotate_right_circular(self, getCaller, setCaller): data = getCaller.get() # RRC 1 cycle s = (data >> 1) + ((data & 0x01) << 7) self.flags_and_setter_finish(s, setCaller) # 1 cycle def rotate_right_circular_a(self): # RRCA 1 cycle self.rotate_right_circular(RegisterCallWrapper(self.a), \ RegisterCallWrapper(self.a)) def rotate_right(self, getCaller, setCaller): # 1 cycle s = (getCaller.get() >> 1) if self.f.c_flag: s += 0x08 self.flags_and_setter_finish(s, setCaller) # 1 cycle def rotate_right_a(self): # RRA 1 cycle self.rotate_right(RegisterCallWrapper(self.a), \ RegisterCallWrapper(self.a)) def shift_left_arithmetic(self, getCaller, setCaller): # 2 cycles s = (getCaller.get() << 1) & 0xFF self.flags_and_setter_finish(s, setCaller, 0x80) # 1 cycle def shift_right_arithmetic(self, getCaller, setCaller): data = getCaller.get() # 1 cycle s = (data >> 1) + (data & 0x80) self.flags_and_setter_finish(s, setCaller) # 1 cycle def shift_word_right_logical(self, getCaller, setCaller): # 2 cycles s = (getCaller.get() >> 1) self.flags_and_setter_finish(s, setCaller) # 2 cycles def flags_and_setter_finish(self, s, setCaller, compare_and=0x01): # 2 cycles s &= 0xFF self.f.z_flag_compare(s, reset=True) self.f.c_flag_add(s, compare_and) setCaller.set(s) # 1 cycle def swap(self, getCaller, setCaller): data = getCaller.get() # 1 cycle s = ((data << 4) + (data >> 4)) & 0xFF self.f.z_flag_compare(s, reset=True) setCaller.set(s) def test_bit(self, getCaller, setCaller, n): # 2 cycles self.f.partial_reset(keep_c=True) self.f.h_flag = True self.f.z_flag = False if (getCaller.get() & (1 << n)) == 0: self.f.z_flag = True self.cycles -= 1 def set_bit(self, getCaller, setCaller, n): # 1 cycle setCaller.set(getCaller.get() | (1 << n)) # 1 cycle def reset_bit(self, getCaller, setCaller, n): # 1 cycle setCaller.set(getCaller.get() & (~(1 << n))) # 1 cycle def store_fetched_memory_in_a(self): # LD A,(nnnn), 4 cycles self.a.set(self.read(self.fetch_double_address())) # 1+1 + 2 cycles def write_a_at_bc_address(self): # 2 cycles self.write(self.bc.get(), self.a.get()) def write_a_at_de_address(self): self.write(self.de.get(), self.a.get()) def store_memory_at_bc_in_a(self): self.a.set(self.read(self.bc.get())) def store_memory_at_de_in_a(self): self.a.set(self.read(self.de.get())) def ld_dbRegisteri_A(self, register): # LD (rr),A 2 cycles self.write(register.get(), self.a.get()) # 2 cycles def load_mem_sp(self): # LD (nnnn),SP 5 cycles address = self.fetch_double_address() # 2 cycles self.write(address, self.sp.get_lo()) # 2 cycles self.write((address + 1), self.sp.get_hi()) # 2 cycles self.cycles += 1 def store_a_at_fetched_address(self): # LD (nnnn),A 4 cycles self.write(self.fetch_double_address(), self.a.get()) # 2 cycles def store_memory_at_axpanded_fetch_address_in_a(self): # LDH A,(nn) 3 cycles self.a.set(self.read(0xFF00 + self.fetch())) # 1+1+1 cycles def store_expanded_c_in_a(self): # LDH A,(C) 2 cycles self.a.set(self.read(0xFF00 + self.bc.get_lo())) # 1+2 cycles def load_and_increment_a_hli(self): # loadAndIncrement A,(HL) 2 cycles self.a.set(self.read(self.hl.get())) # 2 cycles self.hl.inc()# 2 cycles self.cycles += 2 def load_and_decrement_a_hli(self): # loadAndDecrement A,(HL) 2 cycles self.a.set(self.read(self.hl.get())) # 2 cycles self.hl.dec() # 2 cycles self.cycles += 2 def write_a_at_expanded_fetch_address(self): # LDH (nn),A 3 cycles self.write(0xFF00 + self.fetch(), self.a.get()) # 2 + 1 cycles def write_a_at_expaded_c_address(self): # LDH (C),A 2 cycles self.write(0xFF00 + self.bc.get_lo(), self.a.get()) # 2 cycles def load_and_increment_hli_a(self): # loadAndIncrement (HL),A 2 cycles self.write(self.hl.get(), self.a.get()) # 2 cycles self.hl.inc() # 2 cycles self.cycles += 2 def load_and_decrement_hli_a(self): # loadAndDecrement (HL),A 2 cycles self.write(self.hl.get(), self.a.get()) # 2 cycles self.hl.dec() # 2 cycles self.cycles += 2 def store_hl_in_sp(self): # LD SP,HL 2 cycles self.sp.set(self.hl.get()) # 1 cycle self.cycles -= 1 def complement_a(self): # CPA self.a.set(self.a.get() ^ 0xFF) self.f.n_flag = True self.f.h_flag = True def decimal_adjust_accumulator(self): # DAA 1 cycle delta = 0 if self.is_h(): delta |= 0x06 if self.is_c(): delta |= 0x60 if (self.a.get() & 0x0F) > 0x09: delta |= 0x06 if (self.a.get() & 0xF0) > 0x80: delta |= 0x60 if (self.a.get() & 0xF0) > 0x90: delta |= 0x60 if not self.is_n(): self.a.set((self.a.get() + delta) & 0xFF) # 1 cycle else: self.a.set((self.a.get() - delta) & 0xFF) # 1 cycle self.f.partial_reset(keep_n=True) if delta >= 0x60: self.f.c_flag = True self.f.z_flag_compare(self.a.get()) def inc_double_register(self, register): # INC rr register.inc() def dec_double_register(self, register): # DEC rr register.dec() def increment_sp_by_fetch(self): # ADD SP,nn 4 cycles self.sp.set(self.get_fetchadded_sp()) # 1+1 cycle self.cycles -= 2 def store_fetch_added_sp_in_hl(self): # LD HL,SP+nn 3 cycles self.hl.set(self.get_fetchadded_sp()) # 1+1 cycle self.cycles -= 1 def get_fetchadded_sp(self): # 1 cycle offset = self.fetch() # 1 cycle s = (self.sp.get() + offset) & 0xFFFF self.f.reset() if (offset >= 0): if s < self.sp.get(): self.f.c_flag = True if (s & 0x0F00) < (self.sp.get() & 0x0F00): self.f.h_flag = True else: if s > self.sp.get(): self.f.c_flag = True if (s & 0x0F00) > (self.sp.get() & 0x0F00): self.f.h_flag = True return s def complement_carry_flag(self): # CCF/SCF self.f.partial_reset(keep_z=True, keep_c=True) self.f.c_flag = not self.f.c_flag def set_carry_flag(self): self.f.partial_reset(keep_z=True) self.f.c_flag = True def nop(self): # NOP 1 cycle self.cycles -= 1 def unconditional_jump(self): # JP nnnn, 4 cycles self.pc.set(self.fetch_double_address()) # 1+2 cycles self.cycles -= 1 def conditional_jump(self, cc): # JP cc,nnnn 3,4 cycles if cc: self.unconditional_jump() # 4 cycles else: self.pc.add(2) # 3 cycles def relative_unconditional_jump(self): # JR +nn, 3 cycles self.pc.add(self.fetch()) # 3 + 1 cycles self.cycles += 1 def relative_conditional_jump(self, cc): # JR cc,+nn, 2,3 cycles if cc: self.relative_unconditional_jump() # 3 cycles else: self.pc.inc() # 2 cycles def unconditional_call(self): # CALL nnnn, 6 cycles self.call(self.fetch_double_address()) # 4+2 cycles def conditional_call(self, cc): # CALL cc,nnnn, 3,6 cycles if cc: self.unconditional_call() # 6 cycles else: self.pc.add(2) # 3 cycles def ret(self): # RET 4 cycles lo = self.pop() # 1 cycle hi = self.pop() # 1 cycle self.pc.set_hi_lo(hi, lo) # 2 cycles def conditional_return(self, cc): # RET cc 2,5 cycles if cc: self.ret() # 4 cycles # FIXME maybe this should be the same self.cycles -= 1 else: self.cycles -= 2 def return_form_interrupt(self): # RETI 4 cycles self.ret() # 4 cycles self.enable_interrupts() # 1 cycle + others self.cycles += 1 def restart(self, nn): # RST nn 4 cycles self.call(nn) # 4 cycles def disable_interrups(self): # DI/EI 1 cycle self.ime = False self.cycles -= 1 def enable_interrupts(self): # 1 cycle self.ime = True self.execute(self.fetch()) # 1 self.handle_pending_interrupt() def halt(self): # HALT/STOP self.halted = True # emulate bug when interrupts are pending if not self.ime and self.interrupt.is_pending(): self.execute(self.memory.read(self.pc.get())) self.handle_pending_interrupt() def stop(self): # 0 cycles self.cycles += 1 self.fetch() # ------------------------------------------------------------------------------ class CallWrapper(object): def get(self, use_cycles=True): raise Exception("called CalLWrapper.get") return 0 def set(self, value, use_cycles=True): raise Exception("called CalLWrapper.set") pass class RegisterCallWrapper(CallWrapper): def __init__(self, register): self.register = register def get(self, use_cycles=True): return self.register.get(use_cycles) def set(self, value, use_cycles=True): return self.register.set(value, use_cycles) class DoubleRegisterCallWrapper(CallWrapper): def __init__(self, register): self.register = register def get(self, use_cycles=True): return self.register.get(use_cycles) def set(self, value, use_cycles=True): return self.register.set(value, use_cycles) class CPUPopCaller(CallWrapper): def __init__(self, cpu): self.cpu = cpu def get(self, use_cycles=True): return self.cpu.pop(use_cycles) class CPUFetchCaller(CallWrapper): def __init__(self, cpu): self.cpu = cpu def get(self, use_cycles=True): return self.cpu.fetch(use_cycles) # ------------------------------------------------------------------------------ # OPCODE LOOKUP TABLE GENERATION ----------------------------------------------- GROUPED_REGISTERS = [CPU.get_b, CPU.get_c, CPU.get_d, CPU.get_e, CPU.get_h, CPU.get_l, CPU.get_hli, CPU.get_a] def
#!/usr/bin/env python # -*- coding: utf-8 -*- # # fsfs-reshard.py REPOS_PATH MAX_FILES_PER_SHARD # # Perform an offline conversion of an FSFS repository between linear (format # 2, usable by Subversion 1.4+) and sharded (format 3, usable by Subversion # 1.5+) layouts. # # The MAX_FILES_PER_SHARD argument specifies the maximum number of files # that will be stored in each shard (directory), or zero to specify a linear # layout. Subversion 1.5 uses a default value of 1000 files per shard. # # As the repository will not be valid while the conversion is in progress, # the repository administrator must ensure that access to the repository is # blocked for the duration of the conversion. # # In the event that the conversion is interrupted, the repository will be in # an inconsistent state. The repository administrator should then re-run # this tool to completion. # # # Note that, currently, resharding from one sharded layout to another is # likely to be an extremely slow process. To reshard, we convert from a # sharded to linear layout and then to the new sharded layout. The problem # is that the initial conversion to the linear layout triggers exactly the # same 'large number of files in a directory' problem that sharding is # intended to solve. # # ==================================================================== # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ==================================================================== # # $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/server-side/fsfs-reshard.py $ # $LastChangedDate: 2009-11-16 19:07:17 +0000 (Mon, 16 Nov 2009) $ # $LastChangedBy: hwright $ # $LastChangedRevision: 880911 $ import os, stat, sys from errno import EEXIST def usage(): """Print a usage message and exit.""" print("""usage: %s REPOS_PATH MAX_FILES_PER_SHARD [START END] Perform an offline conversion of an FSFS repository between linear (readable by Subversion 1.4 or later) and sharded (readable by Subversion 1.5 or later) layouts. The MAX_FILES_PER_SHARD argument specifies the maximum number of files that will be stored in each shard (directory), or zero to specify a linear layout. Subversion 1.5 uses a default value of 1000 files per shard. Convert revisions START through END inclusive if specified, or all revisions if unspecified. """ % sys.argv[0]) sys.exit(1) def incompatible_repos_format(repos_path, format): """Print an error saying that REPOS_PATH is a repository with an incompatible repository format FORMAT, then exit.""" sys.stderr.write("""error: unable to convert repository '%s'. This repository is not compatible with this tool. Valid repository formats are '3' or '5'; this repository is format '%s'. """ % (repos_path, format)) sys.stderr.flush() sys.exit(1) def incompatible_fs_format(repos_path, format): """Print an error saying that REPOS_PATH is a repository with an incompatible filesystem format FORMAT, then exit.""" sys.stderr.write("""error: unable to convert repository '%s'. This repository contains a filesystem that is not compatible with this tool. Valid filesystem formats are '1', '2', or '3'; this repository contains a filesystem with format '%s'. """ % (repos_path, format)) sys.stderr.flush() sys.exit(1) def unexpected_fs_format_options(repos_path): """Print an error saying that REPOS_PATH is a repository with unexpected filesystem format options, then exit.""" sys.stderr.write("""error: unable to convert repository '%s'. This repository contains a filesystem that appears to be invalid - there is unexpected data after the filesystem format number. """ % repos_path) sys.stderr.flush() sys.exit(1) def incompatible_fs_format_option(repos_path, option): """Print an error saying that REPOS_PATH is a repository with an incompatible filesystem format option OPTION, then exit.""" sys.stderr.write("""error: unable to convert repository '%s'. This repository contains a filesystem that is not compatible with this tool. This tool recognises the 'layout' option but the filesystem uses the '%s' option. """ % (repos_path, option)) sys.stderr.flush() sys.exit(1) def warn_about_fs_format_1(repos_path, format_path): """Print a warning saying that REPOS_PATH contains a format 1 FSFS filesystem that we can't reconstruct, then exit.""" sys.stderr.write("""warning: conversion of '%s' will be one-way. This repository is currently readable by Subversion 1.1 or later. This tool can convert this repository to one that is readable by either Subversion 1.4 (or later) or Subversion 1.5 (or later), but it is not able to convert it back to the original format - a separate dump/load step would be required. If you would like to upgrade this repository anyway, delete the file '%s' and re-run this tool. """ % (repos_path, format_path)) sys.stderr.flush() sys.exit(1) def check_repos_format(repos_path): """Check that REPOS_PATH contains a repository with a suitable format; print a message and exit if not.""" format_path = os.path.join(repos_path, 'format') try: format_file = open(format_path) format = format_file.readline() if not format.endswith('\n'): incompatible_repos_format(repos_path, format + ' <missing newline>') format = format.rstrip('\n') if format == '3' or format == '5': pass else: incompatible_repos_format(repos_path, format) except IOError: # In all likelihood, the file doesn't exist. incompatible_repos_format(repos_path, '<unreadable>') def check_fs_format(repos_path): """Check that REPOS_PATH contains a filesystem with a suitable format, or that it contains no format file; print a message and exit if neither is true. Return bool whether the filesystem is sharded.""" sharded = False db_path = os.path.join(repos_path, 'db') format_path = os.path.join(db_path, 'format') try: format_file = open(format_path) format = format_file.readline() if not format.endswith('\n'): incompatible_fs_format(repos_path, format + ' <missing newline>') format = format.rstrip('\n') if format == '1': # This is a format 1 (svndiff0 only) filesystem. We can upgrade it, # but we can't downgrade again (since we can't uncompress any of the # svndiff1 deltas that may have been written). Warn the user and exit. warn_about_fs_format_1(repos_path, format_path) if format == '2': pass elif format == '3': pass else: incompatible_fs_format(repos_path, format) for line in format_file: if format == '2': unexpected_fs_format_options(repos_path) line = line.rstrip('\n') if line == 'layout linear': pass elif line.startswith('layout sharded '): sharded = True else: incompatible_fs_format_option(repos_path, line) format_file.close() except IOError: # The format file might not exist if we've previously been interrupted, # or if the user is following our advice about upgrading a format 1 # repository. In both cases, we'll just assume the format was # compatible. pass return sharded def current_file(repos_path): """Return triple of (revision, next_node_id, next_copy_id) from REPOS_PATH/db/current .""" return open(os.path.join(repos_path, 'db', 'current')).readline().split() def remove_fs_format(repos_path): """Remove the filesystem format file for repository REPOS_PATH. Do not raise an error if the file is already missing.""" format_path = os.path.join(repos_path, 'db', 'format') try: statinfo = os.stat(format_path) except OSError: # The file probably doesn't exist. return # On Windows, we need to ensure the file is writable before we can # remove it. os.chmod(format_path, statinfo.st_mode | stat.S_IWUSR) os.remove(format_path) def write_fs_format(repos_path, contents): """Write a new filesystem format file for repository REPOS_PATH containing CONTENTS.""" format_path = os.path.join(repos_path, 'db', 'format') f = open(format_path, 'wb') f.write(contents) f.close() os.chmod(format_path, stat.S_IRUSR | stat.S_IRGRP) def linearise(path): """Move all the files in subdirectories of PATH into PATH, and remove the subdirectories. Handle conflicts between subdirectory names and files contained in subdirectories by ensuring subdirectories have a '.shard' suffix prior to moving (the files are assumed not to have this suffix. Abort if a subdirectory is found to contain another subdirectory.""" # First enumerate all subdirectories of DIR and rename where necessary # to include a .shard suffix. for name in os.listdir(path): if name.endswith('.shard'): continue subdir_path = os.path.join(path, name) if not os.path.isdir(subdir_path): continue os.rename(subdir_path, subdir_path + '.shard') # Now move all the subdirectory contents into the parent and remove # the subdirectories. for root_path, dirnames, filenames in os.walk(path): if root_path == path: continue if len(dirnames) > 0: sys.stderr.write("error: directory '%s' contains other unexpected directories.\n" \ % root_path) sys.stderr.flush() sys.exit(1) for name in filenames: from_path = os.path.join(root_path, name) to_path = os.path.join(path, name) os.rename(from_path, to_path) os.rmdir(root_path) def shard(path, max_files_per_shard, start, end): """Move the files for revisions START to END inclusive in PATH into subdirectories of PATH named such that subdirectory '0' contains at most MAX_FILES_PER_SHARD files, those named [0, MAX_FILES_PER_SHARD). Abort if PATH is found to contain any entries with non-numeric names.""" tmp = path + '.reshard' try: os.mkdir(tmp) except OSError, e: if e.errno != EEXIST: raise # Move all entries into shards named N.shard.
[] for k in direction} bounding_box = {k: [] for k in direction} for idx, plane in enumerate(direction): slices[plane] = list(range(limits[idx][0], limits[idx][1]+1, 1)) imat[plane], fitvolume[plane] = self.resample_CTplanes(hd_trajectories, plane, lead_model, resolution=.35) span_vector = [sample_width, 0, 0] if plane != 'sag' else [0, sample_width, 0] idx = [0, -1] bounding_box_coords = [] for k in idx: bounding_box_coords.append(hd_trajectories[k,:]-span_vector) bounding_box_coords.append(hd_trajectories[k, :] + span_vector) bounding_box_coords = np.array(bounding_box_coords) axes_name = ['xx', 'yy', 'zz'] box = {k: [] for k in axes_name} for i, dim in enumerate(axes_name): box[dim] = bounding_box_coords.T[i,:].tolist() bounding_box[plane] = box return imat, bounding_box, fitvolume def get_axialplanes(self, marker_coordinates, lead_data, window_size=15, resolution=.5): """returns a plane at a specific window with a certain direction""" if lead_data['transformation_matrix'].shape[0] == 3: lead_data['transformation_matrix'] = np.eye(4)*lead_data['transformation_matrix'][0,0] lead_data['transformation_matrix'][-1,-1] = 1 transformation_matrix = lead_data['transformation_matrix'] transformation_matrix = np.eye(4) bounding_box_coords = [] for k in range(2): bounding_box_coords.append(np.arange(start=marker_coordinates[k]-window_size, stop=marker_coordinates[k]+window_size, step=resolution)) bounding_box_coords.append(np.repeat(marker_coordinates[-1], len(bounding_box_coords[1]))) bounding_box = np.array(bounding_box_coords) meshX, meshY = np.meshgrid(bounding_box[0,:], bounding_box[1,:]) meshZ = np.repeat(bounding_box[-1,0], len(meshX.flatten())) fitvolume_orig = np.array([meshX.flatten(), meshY.flatten(), meshZ.flatten(), np.ones(meshX.flatten().shape)]) fitvolume = np.linalg.solve(transformation_matrix, fitvolume_orig) resampled_points = PlotRoutines.interpolate_CTintensities(lead_data, fitvolume) imat = np.reshape(resampled_points, (meshX.shape[0], -1), order='F') return imat, bounding_box, fitvolume @staticmethod def resample_CTplanes(hd_trajectories, direction, lead_data, resolution=.2, sample_width=10, use_transformation_matrix=False): """Function resampling intesities of the source imaging to a grid which is later used to visualise the leads. [ea_mancor_updatescene lines 264f]""" direction = ''.join(direction) if type(direction) == list else direction # in case direction is entered as list if use_transformation_matrix: # not necessary as all data in cDBS stay within the LPS coordinate system if lead_data['transformation_matrix'].shape[0] == 3: lead_data['transformation_matrix'] = np.eye(4)*lead_data['transformation_matrix'][0,0] lead_data['transformation_matrix'][-1,-1] = 1 transformation_matrix = lead_data['transformation_matrix'] else: transformation_matrix = np.eye(4) xvec = np.arange(start=-sample_width, stop=sample_width+resolution, step=resolution) meanfitline = np.vstack((hd_trajectories.T, np.ones(shape=(1, hd_trajectories.T.shape[1])))) # needed for transformation addvolume = np.tile(xvec,(len(meanfitline.T),1)) fitvolume = [] for t in range(4): fitvolume.append(np.tile(meanfitline[t,:], xvec.shape).reshape(xvec.shape[0], meanfitline.shape[1]).T) fitvolume_orig = np.stack(fitvolume) if direction == 'cor': fitvolume_orig[0,:,:] += addvolume elif direction == 'sag': fitvolume_orig[1, :, :] += addvolume elif direction == 'tra': fitvolume_orig[2, :, :] += addvolume fitvolume = np.linalg.solve(transformation_matrix, np.reshape(fitvolume_orig, (4, -1), order='F')) resampled_points = PlotRoutines.interpolate_CTintensities(lead_data, fitvolume) imat = np.reshape(resampled_points, (meanfitline.shape[1], -1), order='F') return imat, fitvolume_orig # ======================================== Interpolations ======================================== @staticmethod def interpolate_trajectory(orig_trajectory, resolution=20): """interpolates between trajectory points thus creating a „high resolution“ version of it""" hd_trajectory = [] for idx in range(np.array(orig_trajectory).shape[1]): f = scipy.interpolate.interp1d(np.linspace(start=1, stop=50), np.array(orig_trajectory)[:, idx]) hd_trajectory.append(f(np.linspace(start=1, stop=50, num=(-1 + len(orig_trajectory[:, idx])) * resolution + 1))) return np.stack(hd_trajectory).T def interpolate_CTintensities(lead_model, fitvolume): import SimpleITK as sitk img = sitk.ReadImage(os.path.join(*lead_model['filenameCTimaging'])) physical_points = list(map(tuple, fitvolume[0:3,:].T)) #physical_points = physical_points[0:5] num_samples = len(physical_points) physical_points = [img.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in physical_points] #interp_grid_img = sitk.Image((len(physical_points) *([1] * (img.GetDimension() - 1))), sitk.sitkUInt8) interp_grid_img = sitk.Image([num_samples] + [1] * (img.GetDimension() - 1), sitk.sitkUInt8) displacement_img = sitk.Image([num_samples] + [1] * (img.GetDimension() - 1), sitk.sitkVectorFloat64, img.GetDimension()) for i, pnt in enumerate(physical_points): displacement_img[[i] + [0] * (img.GetDimension() - 1)] = np.array(pnt) - np.array( interp_grid_img.TransformIndexToPhysicalPoint([i] + [0] * (img.GetDimension() - 1))) interpolator_enum = sitk.sitkLinear default_output_pixel_value = 0.0 output_pixel_type = sitk.sitkFloat32 if img.GetNumberOfComponentsPerPixel() == 1 else sitk.sitkVectorFloat32 resampled_temp = sitk.Resample(img, interp_grid_img, sitk.DisplacementFieldTransform(displacement_img), interpolator_enum, default_output_pixel_value, output_pixel_type) resampled_points = [resampled_temp[x,0,0] for x in range(resampled_temp.GetWidth())] debug = False if debug: for i in range(resampled_temp.GetWidth()): print(str(img.TransformPhysicalPointToContinuousIndex(physical_points[i])) + ': ' + str(resampled_temp[[i] + [0] * (img.GetDimension() - 1)]) + '\n') return np.array(resampled_points) # ==================== General Helper Functions for manual correction ==================== @staticmethod def load_leadModel(inputdir, filename): """Function loading results from [preprocLeadCT.py] which emulates the PaCER toolbox""" if not inputdir: Output.msg_box(text="No input folder provided, please double-check!", title="Missing input folder") return elif not os.path.isfile(filename): Output.msg_box(text="Models for electrode unavailable, please run detection first!", title="Models not available") return else: with open(filename, "rb") as model: # roughly ea_loadreconstruction in the LeadDBS script lead_models = pickle.load(model) intensityProfiles = pickle.load(model) skelSkalms = pickle.load(model) return lead_models, intensityProfiles, skelSkalms @staticmethod def save_leadModel(lead_models, intensityProfiles, skelSkalms, filename=''): if not filename: Output.msg_box(text='No filename for saving lead model provided', title='No filename provided') return with open(filename, "wb") as f: pickle.dump(lead_models, f) pickle.dump(intensityProfiles, f) pickle.dump(skelSkalms, f) # ==================== Helper Functions fpr plotting Data ==================== @staticmethod def getbgsidecolor(side, xray=False): """ """ from matplotlib import colors line_cols = matplotlib.cm.get_cmap('Set1', 64) # TODO: maybe a cmap would make sense line_cols = colors.rgb_to_hsv(line_cols(np.linspace(0, 1, 64))[:,0:3]) line_cols[:,-1] = line_cols[:,-1]/3 if xray: line_cols[:, 1] = line_cols[:, 1] / 1.5 line_cols[:, 2] = line_cols[:, 2] * 1.5 line_cols = colors.hsv_to_rgb(line_cols) col = line_cols[side,:] # TODO: why on earth is this so complicated to get these colors return col class GetData: def __init__(self, parent=PlotRoutines): self.parent = parent @staticmethod def leadInformation_update(information2update, lead_data): """replaces values of lead_models with updated values; information2update can be marker, rotation, etc.""" for key_name, val in information2update.items(): lead_data[key_name] = val return lead_data # =================================== Functions related with coordinates =================================== def resize_coordinates(self, lead_coords, lead_data): """function which enables resizing cooridnates (e.g. 8 contacts to 4 contacts if needed; additional functionality contains """ if lead_data['model'] == 'Boston Vercise Directional' or 'St Jude 6172' or 'St Jude 6173': coordinates = np.zeros((4, 3)) coordinates[0, :] = lead_coords[0, :] coordinates[1, :] = np.mean(lead_coords[1: 4, :], axis=0) coordinates[2, :] = np.mean(lead_coords[4: 7, :], axis=0) coordinates[3, :] = lead_coords[7, :] emp_dist = GetData.lead_dist(coords=coordinates) else: coordinates = lead_coords emp_dist = GetData.lead_dist(coords=coordinates, factor=lead_data['numel']) return coordinates, emp_dist @staticmethod def lead_dist(coords, factor=3): """calculate lead distances according to its coordinates""" spatial_distance = scipy.spatial.distance.cdist(coords, coords, 'euclidean') emp_dist = np.sum(np.sum(np.tril(np.triu(spatial_distance, 1), 1))) / factor return emp_dist def resolve_coordinates(self, marker, lead_coords_mm, lead_positions, lead_data, resize_bool=False, rszfactor=0): """emulates the function from Lead-DBS ea_resolvecoords; unlike in Lead DBS this is done one at a time cf. https://github.com/netstim/leaddbs/blob/master/templates/electrode_models/ea_resolvecoords.m""" if resize_bool: can_dist = np.linalg.norm(lead_positions["head_position"] - lead_positions["tail_position"]) coords_temp, can_eldist = GetData.resize_coordinates(self, lead_coords_mm, lead_data) stretch = can_dist * (rszfactor / can_eldist) if rszfactor != 0 else can_dist vec = np.divide((marker["markers_tail"] - marker["markers_head"]), np.linalg.norm(marker["markers_tail"] - marker["markers_head"])) marker["markers_tail"] = marker["markers_head"] + vec * stretch coords, traj_vector, trajectory, can_eldist = [[] for _ in range(4)] if not marker["markers_head"].size==0: M = np.stack((np.append(marker["markers_head"], 1), np.append(marker["markers_tail"], 1), np.append(marker["markers_x"], 1), np.append(marker["markers_y"], 1))) E = np.stack((np.append(lead_positions["head_position"], 1), np.append(lead_positions["tail_position"], 1), np.append(lead_positions["x_position"], 1), np.append(lead_positions["y_position"], 1))) X = np.linalg.lstsq(E, M, rcond=None) coords_mm = np.concatenate([lead_coords_mm, np.ones(shape=(lead_coords_mm.shape[0],1))], axis=1) coords = (coords_mm @ X[0]).T coords = coords[0: 3,:].T traj_vector = (marker["markers_tail"] - marker["markers_head"]) / \ np.linalg.norm(marker["markers_tail"] - marker["markers_head"]) trajectory = np.stack((marker["markers_head"] - traj_vector*5, marker["markers_head"] + traj_vector*25)) trajectory = np.array((np.linspace(trajectory[0, 0], trajectory[1, 0], num=50), np.linspace(trajectory[0, 1], trajectory[1, 1], num=50), np.linspace(trajectory[0, 2], trajectory[1, 2], num=50))).T return coords, trajectory, can_eldist, marker # =================================== Functions in combination with leads =================================== @staticmethod def get_default_lead(lead_data): """obtains default lead properties according to the model proposed in the PaCER algorithm @ ./template""" if lead_data['model'] == 'Boston Vercise Directional': # load mat-file to proceed mat_filename = 'boston_vercise_directed.mat' lead_model = loadmat(os.path.join(ROOTDIR, 'ext', 'LeadDBS', mat_filename), 'r')['electrode'] default_positions = {x: np.hstack(vals) for x, vals in lead_model.items() if x.endswith('position')} default_coordinates = np.array(lead_model['coords_mm']) # in [mm] else: Output.msg_box(text="Lead type not yet implemented.", title="Lead type not implemented") return return lead_model, default_positions, default_coordinates def get_leadModel(self, lead_data, default_positions, default_coordinates, side, resize=False): """ reads and estimates all necessary markers and trajectories for the corresponding lead model """ print("\t... reading lead data properties for {} side and estimating rotation".format(side)) marker_unprocessed = dict([(k, r) for k, r in lead_data.items() if k.startswith('marker')]) if not (lead_data['first_run'] and lead_data['manual_correction']): resize = True # TODO: Not sure if this is doing the job; some warning/information should be displayed that > first run lead_data['first_run'] = False _, lead_data['trajectory'], _, marker_temp = \ GetData.resolve_coordinates(self, marker_unprocessed, default_coordinates, default_positions, lead_data, resize_bool=resize) lead_data['rotation'] = GetData.initialise_rotation(lead_data, marker_temp) xvec, yvec, lead_data['rotation'], marker_rotation = GetData.estimate_rotation(lead_data, marker_temp) lead_data = GetData.leadInformation_update(marker_rotation, lead_data) if xvec.size == 0 or yvec.size == 0: xvec, yvec, lead_data['rotation'], marker_rotation = GetData.estimate_rotation(lead_data, marker_rotation) lead_data = GetData.leadInformation_update(marker_rotation, lead_data) options2process = {'xvec': [1, 0, 0], 'yvec': [0, 1, 0]} unrot = {k: [] for k in options2process.keys()} for key in options2process: vec_temp = np.cross(lead_data["normtraj_vector"], options2process[key]) unrot[key] = np.divide(vec_temp, np.linalg.norm(vec_temp)) marker = dict([(k, r) for k, r in lead_data.items() if k.startswith('marker')]) coordinates, trajectory, _, _ = GetData.resolve_coordinates(self, marker, default_coordinates, default_positions, lead_data, resize_bool=False) # ea_mancor_updatescene line 144 return marker, coordinates, trajectory, resize def worker_xy_planes(self, traj, lead, dimension, side, IM_dict, BB_dict, FV_dict, queue): print("\t...extracting intensities for CTimages @ {} side\t...".format(side)) queue.put('.') intensity_matrix, bounding_box, fitvolume = GetData.get_xyplanes(self, trajectory=traj, lead_model=lead, direction=dimension) IM_dict[side] = intensity_matrix
## Copyright (c) 2001-2009, <NAME> ## August 2009 #----------------------------------------------------------------------- # Notes: This file defines a "base class" for CSDMS "process" # components. Many of these functions are implementations # of methods defined in "topoflow3.IRFPort.sidl". They are # therefore required to use TF components in a CCA framework. #----------------------------------------------------------------------- # # unit_test() # # class CSDMS_component # __init__() # get_status() # is_scalar() # is_vector() # is_grid() # has_variable() # ----------------------------- # Next 3 currently identical # ----------------------------- # get_scalar_double() # get_vector_double() ## (2/16/10 # get_grid_double() # get_values_in_grid_double() ## (2/17/10) # ----------------------------- # Next 3 currently identical # ----------------------------- # set_scalar_double() # set_vector_double() ## (2/16/10) # set_grid_double() # set_values_in_grid_double() ## (2/17/10) # --------------------- # get_scalar_long() # get_vector_long() # get_grid_long() # get_values_in_grid_long() ## (2/17/10) # --------------------- # set_scalar_long() ## (2/16/10) # set_vector_long() ## (2/16/10) # set_grid_long() ## (2/16/10) # set_values_in_grid_long() ## (2/17/10) # --------------------- # get_input_items() # get_output_items() # --------------------- # read_gui_info_file() ### 11/13/09 # get_user_input() ### 9/24/09, 11/13/09 # load_user_input() ### 10/1/09 # --------------------- # go() # run_model() # ### read_config_file() # (template) # ### initialize # (template) # ### update() # (template) # finalize() # ----------------------------------------------- # These methods are not part of "IRF" interface # but are used by the TopoFlow components. # ----------------------------------------------- # initialize_required_components() # get_cca_port() # get_cca_ports() # release_cca_ports() # ------------------------ # add_child_port() # get_port_data() # (rename to get_port_double ??) # set_port_data() # (rename to set_port_double ??) # ------------------------ # get_rank() # get_size() # ------------------------ # set_directory() # read_grid_info() # store_outlet_IDs() # initialize_time_vars() # update_time() # print_time_and_value() # print_run_time() #----------------------------------------------------------------------- from numpy import * import numpy import sys, os, time import cfg_files as cfg import pixels import rti_files import tf_utils #----------------------------------------------------------------------- def unit_test(): directory = tf_utils.TF_Test_Directory() data_prefix = tf_utils.TF_Test_Data_Prefix() case_prefix = tf_utils.TF_Test_Case_Prefix() c = CSDMS_component() c.CCA = False print 'Instantiated component.' #-------------------------------------------- # Check the "read_gui_info_file()" function #-------------------------------------------- ## gui_info_file = '/Applications/Erode/gui_info/Erode_GUI_Info.txt' ## gui_info_file = '/data/progs/erode/3.0/gui_info/Erode_GUI_Info.txt' gui_info_file = '/Users/peckhams/Desktop/GC2D_GUI_Info.txt' var_names, labels, values, min_vals, max_vals, \ desc, group_names, group_sizes = \ c.read_gui_info_file( gui_info_file ) print 'var_names =' print var_names print 'labels =' print labels print 'values =' print values print 'min_vals =' print min_vals print 'max_vals =' print max_vals print 'desc =' print desc print 'group_names =' print group_names print 'group_sizes =' print group_sizes return #------------------------------------- # Test the "print_run_time()" method #------------------------------------- ## print ' ' ## print 'Testing "print_run_time()"...' ## c.print_run_time(seconds=1.618) ## c.print_run_time(seconds=60) ## c.print_run_time(seconds=3600) ## c.print_run_time(seconds=3600 * 24) ## c.print_run_time(seconds=3600 * 24 * 365) #--------------------------- # Test some of the methods #--------------------------- c.a = numpy.float64(5) print 'c.a = numpy.float64(5)' print "c.is_scalar('a') =", c.is_scalar('a') print "c.is_grid('a') =", c.is_grid('a') v1 = c.get_port_data('a', c) print "c.get_port_data('a',c) =", v1 print ' ' #------------------------------------------------- c.b = numpy.zeros((3,3), dtype='Float64') print "c.b = numpy.zeros((3,3), dtype='Float64')" print "c.is_scalar('b') =", c.is_scalar('b') print "c.is_grid('b') =", c.is_grid('b') v2 = c.get_port_data('b', c) print "c.get_port_data('b',c) =", v2 print ' ' #------------------------------------------------- print "c.is_scalar('b[1]') =", c.is_scalar('b[1]') print "c.is_grid('b[1]') =", c.is_grid('b[1]') v3 = c.get_port_data('b[1]', c) print "c.get_port_data('b[1]',c) =", v3 print ' ' #------------------------------------------------- # This component has no initialize() method ## c.initialize(directory=directory, ## data_prefix=data_prefix, ## case_prefix=case_prefix) ## print 'nx =', c.nx ## print 'ny =', c.ny # unit_test() #----------------------------------------------------------------------- class CSDMS_component: def __init__(self): self.CCA = tf_utils.TF_Use_CCA() self.DEBUG = True self.SILENT = True self.REPORT = False self.status = 'created' # (OpenMI 2.0 conventions) self.USER_SET_VALUES = False # __init__() #------------------------------------------------------------------- def get_status(self): #----------------------------------------------------- # Notes: Return component status as a string. The # possible return values are from OpenMI 2.0: # # created, initializing, initialized, # updating, updated, finalizing, finalized, # failed (could add "stopped"). #----------------------------------------------------- return self.status # get_status() #------------------------------------------------------------------- def is_scalar(self, var_name): #------------------------------------------------ # NB! Case in var_name must be an exact match. #------------------------------------------------- exec("n = numpy.rank(self." + var_name + ")") return (n == 0) # is_scalar() #------------------------------------------------------------------- def is_vector(self, var_name): #------------------------------------------------ # NB! Case in var_name must be an exact match. #------------------------------------------------ exec("n = numpy.rank(self." + var_name + ")") return (n == 1) # is_vector() #------------------------------------------------------------------- def is_grid(self, var_name): #------------------------------------------------ # NB! Case in var_name must be an exact match. #------------------------------------------------ #------------------------------------------------- # (9/29/09) This might be causing a problem with # the c++ bindings for this CCA component. #------------------------------------------------- ## exec("type_str = str(type(self." + var_name + "))") ## p1 = type_str.find("ndarray") ## p2 = type_str.find("float") ## if (p1 == -1) and (p2 == -1): ## print 'ERROR: type(' + var_name + ') =' + type_str ## return False #------------------------------------------------- # (9/29/09) This might be causing a problem with # the c++ bindings for this CCA component. #------------------------------------------------- ## if ("ndarray" not in type_str) and \ ## ("float" not in type_str): ## print 'ERROR: type(' + var_name + ') =' + type_str ## return False #------------------------------------------------------- exec("n = numpy.rank(self." + var_name + ")") return (n == 2) # is_grid() #------------------------------------------------------------------- def has_variable(self, var_name): #------------------------------------------------------ # If var_name includes square brackets for subscripts # remove them to get the actual variable name. #------------------------------------------------------ bracket_pos = var_name.find('[') if (bracket_pos != -1): key = var_name[0:bracket_pos] else: key = var_name #--------------------------------------------------- # Does current component have requested variable ? #--------------------------------------------------- SILENT = True VARIABLE_FOUND = self.__dict__.has_key(key) if not(VARIABLE_FOUND) and not(SILENT): print 'WARNING: Component does not have the' print ' requested variable: ' + var_name print ' ' return VARIABLE_FOUND # has_variable() #------------------------------------------------------------------- def get_scalar_double(self, var_name): #------------------------------------ # Note: The next line doesn't work. #------------------------------------ ## exec("return self." + var_name) #--------------------------------------------------- # Does current component have requested variable ? #--------------------------------------------------- # This is not used yet because possible impact on # performance has not be tested yet. (2/17/10) # If it does get used later, it will be added to # all of the "getters". #--------------------------------------------------- ## if not(self.has_variable(var_name)): ## return float64(0) try: exec("result = self." + var_name) return numpy.float64(result) except: print 'ERROR in CSDMS_base.get_scalar_double().' print ' Returning 0.' return numpy.float64(0) ############## flush output here ?? # get_scalar_double() #------------------------------------------------------------------- def get_vector_double(self, var_name): #--------------------------------------------------------- # Note: This function was causing a "segmentation fault # in gui-backend.sh" error message when trying to # run TopoFlow through the CMT (in CCA framework). # Solution was to use numpy.array, as shown. # (2/17/10) #--------------------------------------------------------- try: exec("result = self." + var_name) return numpy.array(result, dtype='float64') #------------------------- # NB! This doesn't work. #------------------------- # return numpy.float64(result) except: print 'ERROR in CSDMS_base.get_vector_double().' print ' Returning zeros.' return numpy.zeros([1], dtype='float64') # get_vector_double() #------------------------------------------------------------------- def get_grid_double(self, var_name): try: exec("result = self." + var_name) return numpy.float64(result) except: print 'ERROR in CSDMS_base.get_grid_double().' print ' Returning zeros.' return numpy.zeros([1,1], dtype='float64') # get_grid_double() #------------------------------------------------------------------- def get_values_in_grid_double(self, var_name, IDs): #--------------------------------------------------------- # Note: This function was causing a "segmentation fault # in gui-backend.sh" error message when trying to # run TopoFlow through the CMT (in CCA framework). # Solution was to use numpy.array, as shown. # (2/18/10) #--------------------------------------------------------- # Notes: This function was tested in the new Diversions # component on (2/18/10). #--------------------------------------------------------- try: exec("result = self." + var_name + '.flat[IDs]') return numpy.array(result, dtype='float64') ## return numpy.float64(result) except: print 'ERROR in CSDMS_base.get_values_in_grid_double().' print ' Returning zeros.' return numpy.zeros(len(IDs), dtype='float64') # get_values_in_grid_double() #------------------------------------------------------------------- #------------------------------------------------------------------- def set_scalar_double(self, var_name, scalar): exec("self." + var_name + " = numpy.float64(scalar)") # set_scalar_double() #------------------------------------------------------------------- def set_vector_double(self, var_name, vector): #-------------------------------------------------- # Notes: First method here should be more robust. # See Notes for get_vector_double(). #-------------------------------------------------- exec("self." + var_name + " = numpy.array(vector, dtype='float64')") #----------------------------------- # Original method (before 2/17/10) #----------------------------------- # exec("self." + var_name + " = numpy.float64(vector)") # set_vector_double() #------------------------------------------------------------------- def set_grid_double(self, var_name, grid): exec("self." + var_name + " = numpy.float64(grid)") # set_grid_double() #------------------------------------------------------------------- def set_values_in_grid_double(self, var_name, IDs, values): #-------------------------------------------------------- # Notes: This function was tested in the new Diversions # component on (2/18/10). #-------------------------------------------------------- exec("self." + var_name + ".flat[IDs] = values") # exec("self." + var_name + ".flat[IDs] = numpy.float64(values)") # set_values_in_grid_double() #------------------------------------------------------------------- #------------------------------------------------------------------- def get_scalar_long(self, var_name): exec("result = numpy.int32(self." + var_name + ")") return result # get_scalar_long() #------------------------------------------------------------------- def get_vector_long(self, var_name): #-------------------------------------------- # Notes: See Notes for get_vector_double(). #-------------------------------------------- try: exec("result = self." + var_name) return numpy.array(result, dtype='int32') #------------------------- # NB! This doesn't work. #------------------------- # return numpy.int32(result) except: print 'ERROR in CSDMS_base.get_vector_long().' print ' Returning zeros.' return numpy.zeros([1], dtype='int32') # get_vector_long() #------------------------------------------------------------------- def get_grid_long(self, var_name): exec("result = numpy.int32(self." + var_name + ")") return result # get_grid_long() #------------------------------------------------------------------- def get_values_in_grid_long(self, var_name, IDs): try: exec("result = self." + var_name + '.flat[IDs]') return numpy.int32(result) except: print 'ERROR in CSDMS_base.get_values_in_grid_long().' print ' Returning zeros.' return numpy.zeros(len(IDs), dtype='int32') # get_values_in_grid_long() #------------------------------------------------------------------- #------------------------------------------------------------------- def set_scalar_long(self, var_name, scalar): exec("self." + var_name + " =
linewidth=linewidth, c=color) ax.errorbar(sigmas[1:-1], averages[1:-1], yerr=stds[1:-1],fmt='o', capthick=linewidth, elinewidth=linewidth, capsize=linewidth*6,markersize=10, c=color) sns.despine(offset=10)#, trim=True); ax.set_aspect(0.5) ax.set_yscale("log")#, nonposy='clip') ax.set_xscale("log")#, nonposy='clip') ax.set_xlabel('$\sigma$',fontsize=textsize*2) ax.set_ylabel(r'$|\mathcal{R}_1(\sigma)-\mathcal{R}_2|$',fontsize=textsize*1.5) plt.setp(ax.get_xticklabels(), size=textsize) plt.setp(ax.get_yticklabels(), fontsize=textsize)#, rotation='vertical') # Dropping alternate tick labels because the text size is too large (but necessarily large) #plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) #plt.setp(ax.get_xticklabels()[::2], visible=True) plt.setp(ax.get_yticklabels()[::2], visible=True) # ax.minorticks_off() ax.tick_params(axis=u'both', direction='out', width=linewidth, length=linewidth*4)#,labelsize=for) for spine in ax.spines.keys(): ax.spines[spine].set_linewidth(linewidth) #print "R - (phi+psi-2pi)/4pi shows the following distribution: %f +/- %f" %() plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0) #gs.update(wspace=0.0, hspace=0) #plt.subplots_adjust(wspace=.201) #plt.subplots_adjust(hspace=0.001) plt.savefig(figname, dpi=180, bbox_inches='tight',transparent=True) #facecolor='w', edgecolor='w', if show_graphs: os.system(pdf_viewer+" "+figname) # # ==================================================================================== if 0: # not used in main manuscript figname = output_fig_dir+"/fig_various_rama_plots.pdf" sns.axes_style("ticks")# # VERY IMPORTANT FOR GETTING THE SHAPE OF EACH PANEL CORRECT plt.figure(figsize=(12*1.2,6*1.2)) set_grays() textsize = plt.rcParams['font.size'] linewidth = plt.rcParams['lines.linewidth']*.5 # Needs the length_dependent_csv_file (to be read in as the pandas dataframe 'df') # WARNING: VERY COSTLY TO REDO: if not os.path.isfile(length_dependent_csv_file): prepare_main_csv_file() df = pd.read_csv(length_dependent_csv_file) df = df[(df['L']==16.0)] ''' _______________ |_______________| colorbar (0,1) |--------|--------|--------|--------| trans | rg | R | theta | d | | (0,0) | (0,1) | (0,2) | (0,3) | |--------|--------|--------|--------| cis | rg | R | theta | d | | (1,0) | (1,1) | (1,2) | (1,3) | |--------|--------|--------|--------| ''' gs = mpl.gridspec.GridSpec(3, 4, width_ratios =[1,1,1,1], height_ratios=[1,20,20]) gs.update(wspace=0.4, hspace=0.5) ax_cmap = plt.subplot(gs[0,1:3]) ax_trans_0 = plt.subplot(gs[1,0]) ax_trans_1 = plt.subplot(gs[1,1]) ax_trans_2 = plt.subplot(gs[1,2]) ax_trans_3 = plt.subplot(gs[1,3]) ax_cis_0 = plt.subplot(gs[2,0]) ax_cis_1 = plt.subplot(gs[2,1]) ax_cis_2 = plt.subplot(gs[2,2]) ax_cis_3 = plt.subplot(gs[2,3]) panels_a_through_c = ['rg','theta','d','R'] axes = {'cmap' : ax_cmap, 'trans':{ panels_a_through_c[0]: ax_trans_0, panels_a_through_c[1]: ax_trans_1, panels_a_through_c[2]: ax_trans_2, panels_a_through_c[3]: ax_trans_3 }, 'cis': { panels_a_through_c[0]: ax_cis_0, panels_a_through_c[1]: ax_cis_1, panels_a_through_c[2]: ax_cis_2, panels_a_through_c[3]: ax_cis_3 } } cmap = sns.cubehelix_palette(100, start=.5, rot=-.75, dark=0.2, light=1, as_cmap=True) #cmap = sns.diverging_palette(240, 10, n=2, as_cmap=True) panel_letters = itertools.cycle(list(string.ascii_lowercase)) amin = -180.0; amax=180.0; for omega in [180.0,0.0]: cis_or_trans = "trans" if omega == 0.0: cis_or_trans = "cis" xtype = "phi"; ytype = "psi"; for ztype in panels_a_through_c: #axes['trans'].keys(): ax = axes[cis_or_trans][ztype] tdf = df[(df['omega'] == omega)] # & (df['psi']>0)] x = tdf[xtype] y = tdf[ytype] levels = 10 trunkated_levels = levels/2 real_z_type = ztype if real_z_type == 'd': z = do_d(tdf[real_z_type]) elif real_z_type == 'd2': real_z_type = 'd' z = np.abs(tdf[real_z_type]) elif real_z_type == 'theta': z = do_theta(tdf[real_z_type]) else: z = tdf[real_z_type] if real_z_type == 're' or real_z_type == 'rg' or ztype == 'd2': levels = 7 if ztype == 'd2': levels = 5 if np.min(z) < 0: trunkated_levels += 1 ''' # This proves that 'R(psi,psi)' ~ '(phi+psi+2pi)/4pi' if ztype == 'R': min_v = -360.0 max_v = 360.0 z = (tdf['phi'] + tdf['psi'] - min_v)/(max_v-min_v) ''' X,Y,Z = locallib.xyz_to_ndarrays(x,y,z) if 0: # smooth functions if desired (it messes with contour lines sometimes at plot edges, though) Z = scipy.ndimage.filters.gaussian_filter(Z, 2)#, order=0) if 0: ax.imshow(Z, cmap='hot', interpolation='nearest') else: # DRAWING THE FILLED CONTOUR PLOT! ax.contourf(X, Y, Z, levels, cmap = cmap) # DRAWING LINES (negative contours will be dashed by default) CS = ax.contour(X, Y, Z, levels, colors=[plt.rcParams['lines.color']],linewidth=linewidth) # =============================================================================== # ALL THIS STUFF IS FOR LABELING CONOTOURS (drop alternating labels, etc) # Recast level labels fmt = {} for l in CS.levels: s = "%1.1f" %(round(l,1)) if float(l) == float(int(l)): s = "%d" %(l) fmt[l] = s # Dropping every <skip_number> terms (ignored if skip_number == 0) counter = 0 skip_number = 2 if skip_number: for collection_i in range(len(CS.collections)): counter+=1 if counter % skip_number == 0: plt.setp(CS.collections[collection_i], linewidth=0) fmt[CS.levels[collection_i]] = '' xmin,xmax = ax.get_xlim(); ymin,ymax = ax.get_ylim(); # getting axes min max values label_pos = [] midpoint = np.array([float(xmax+xmin)/2.0, float(ymax+ymin)/2.0]) for line in CS.collections: for path in line.get_paths(): # find closest point X = [] Y = [] label_x = 0.0 label_y = 0.0 distance_from_center = [] for point in path.vertices: distance_from_center.append(np.linalg.norm(point-midpoint)) label_position_index = distance_from_center.index(np.min(distance_from_center)) if 0:# ztype == 'd': plt.scatter(path.vertices[:,0],path.vertices[:,1]) plt.scatter(*path.vertices[label_position_index],s=100,c='r') plt.ylim((-190,190)) plt.xlim((-190,190)) plt.show() label_pos.append(path.vertices[label_position_index]) CLS = ax.clabel(CS, fontsize=textsize*.65, inline=1, fmt=fmt,manual=label_pos, colors='k') if 1: # -------------------------------------------------------------------------- # SOMETIMES, LABELS ENCROACH ON THE AXES AND GET CLIPPED. HERE WE FIND THOSE # THAT EXCEED A THRESHOLD DISTANCE AND DELETE THEM # Swiped from: http://stackoverflow.com/questions/25873681/matplotlib-contour-plot-labels-overlap-axes thresh = 0.02 # ratio in x/y range in border to discard xmin,xmax = ax.get_xlim(); ymin,ymax = ax.get_ylim() # getting axes min max values Dx = xmax-xmin Dy = ymax-ymin # check which labels are near a border keep_labels = [] labels_to_delete = [] for label in CLS: lx,ly = label.get_position() if xmin+thresh*Dx<lx<xmax-thresh*Dx and ymin+thresh*Dy<ly<ymax-thresh*Dy: # inlier, redraw it later keep_labels.append((lx,ly)) else: labels_to_delete.append(label) # delete the original lines, redraw manually the labels we want to keep # this will leave unlabelled full contour lines instead of overlapping labels #for cline in CS.collections: # cline.remove() for label in labels_to_delete: label.remove() # LABEL STUFF DONE, DONE. D.O.N.E. # =============================================================================== #'-' solid line style; '--' dashed line style; '-.' dash-dot line style; ':' dotted line style #ax.plot([-180,180],[180,-180], 'k--') # -ve diagonal #ax.plot([-180,180],[0,0], 'k:') # Horizontal line passing (0,0) #ax.plot([0,0],[-180,180], 'k:') # Vertical line passing (0,0) xticks = range(int(amin),int(amax)+1,180) yticks = range(int(amin),int(amax)+1,180) # Setting ticks ax.tick_params(axis=u'both', direction='out', width=linewidth, length=linewidth*5, pad=0,labelsize=textsize) ax.set_xticks(xticks) plt.setp(ax.get_xticklabels(), fontsize=textsize*.7) #rotation='vertical', plt.setp(ax.get_yticklabels(), fontsize=textsize*.7) #rotation='vertical', ax.set_xlabel(type_to_label[xtype], size=textsize) # Drawing an equal-aspect-ratio graph #ax.set_aspect(1) # Setting title if 1:#cis_or_trans == 'trans': addto_title = r'('+next(panel_letters)+r')' t = ax.set_title(addto_title, size=textsize*1.05,loc='left') ax.set_title(type_to_label[ztype], size=textsize*1.15,loc='center') t.set_y(1.03) # Shifting the title up a little ''' |--------|--------|--------|--------| trans | re | R | theta | |d| | | (0,0) | (0,1) | (0,2) | (0,3) | |--------|--------|--------|--------| cis | re | R | theta | |d| | | (1,0) | (1,1) | (1,2) | (1,3) | |--------|--------|--------|--------| ''' ax.set_yticks(yticks) if ztype != panels_a_through_c[0]: # erase the y labels ax.set_yticklabels([]) ax.set_ylabel("", size=textsize) elif cis_or_trans == 'cis': ax.set_yticks(yticks) ax.set_ylabel(r"cis"+"\n\n"+type_to_label[ytype], size=textsize,weight='normal',style='italic') elif cis_or_trans == 'trans': ax.set_ylabel(r"trans"+"\n\n"+type_to_label[ytype], size=textsize,weight='normal',style='italic') if cis_or_trans != 'cis': # erase the x labels #ax.set_xticklabels([]) ax.set_xlabel("", size=textsize) plt.sca(ax);plt.xticks(rotation=45) plt.sca(ax);plt.yticks(rotation=45) for spine in ax.spines.keys(): ax.spines[spine].set_linewidth(linewidth*2.0) if 1: # addding the colorbar ''' _______________ |_______________| colorbar (0,1) |--------|--------|--------|--------| trans | rg | re | R | |d| | | (1,0) | (1,1) | (1,2) | (1,3) | |--------|--------|--------|--------| cis | rg | re | R | |d| | | (2,0) | (2,1) | (2,2) | (2,3) | |--------|--------|--------|--------| ''' ax = axes['cmap'] #plt.subplot2grid((3,4), (0,1), colspan=2) # Making a fake 'figure' for a colorbar (easier than controling an orphan colorbar) x = [] ; y = [] ; z = [] ; for i in range(0,101,5): # to be x and y for j in [0,5,10]: # to be i and j x.append(i) y.append(j) z.append(i) x = np.array(x); y = np.array(y); z = np.array(z); X,Y,Z = locallib.xyz_to_ndarrays(x,y,z) # Plot the colorbar ax.contourf(X,Y,Z, 20,cmap = cmap) # Just setting the various aspects of the ticks, labels, etc. ax.tick_params(axis=u'both', direction='in', width=linewidth, length=0, pad=2,labelsize=textsize*0.8, top=True, labeltop=True, bottom=False, labelbottom=False) ax.set_xticks([4,50,96]) ax.set_xticklabels(['low','medium','high']) ax.set_yticks([]) # Resetting the frame size for spine in ax.spines.keys(): ax.spines[spine].set_linewidth(linewidth*2.0) # # #plt.tight_layout()#pad=0.0, w_pad=0.0, h_pad=0.2) #plt.subplots_adjust(wspace=.201) #plt.subplots_adjust(hspace=0.01) plt.savefig(figname, dpi=180, bbox_inches='tight',transparent=True) #facecolor='w', edgecolor='w', if show_graphs: os.system(pdf_viewer+" "+figname) #plt.show() if 0: # not used in main manuscript figname = output_fig_dir+"/fig_various_relationships.pdf" #sns.reset_orig() sns.set_style("ticks") set_grays() # VERY IMPORTANT FOR GETTING THE SHAPE OF EACH PANEL CORRECT plt.figure(figsize=(12,5)) # DRAWING A BUNCH OF RELATIONSHIPS! colors = ['b','r','g','k'] # Shows all other parameters to muck about with: #print c textsize = plt.rcParams['font.size'] linewidth = plt.rcParams['lines.linewidth'] ytype = 'rg' # ======================================================================= # Here, we generate a set of relationships ''' colorbar |---|---|---|---| cis |0,0|0,1|0,2|0,3| |---|---|---|---| trans |1,0|1,1|1,2|0,3| |---|---|---|---| phi R theta d ''' #plt.plot([0, 1], [0, 2], sns.xkcd_rgb["medium green"], lw=3) #plt.plot([0, 1], [0, 3], sns.xkcd_rgb["denim blue"] c1 = sns.hls_palette(2, l=.3, s=.6)[1] # should be blue c2 = sns.hls_palette(2, l=.8, s=.9)[0] # should be red c3 = sns.hls_palette(3, l=.8, s=.9)[1] # should be green relationships_palette = [c1,c2,c3] #sns.color_palette("colorblind") # Declaring the various panels gs = mpl.gridspec.GridSpec(3, 4, width_ratios =[1,1,1,1], height_ratios=[2,10,10]) # Declaring the various axes (panels) row_no = 1 ax_0_0 = plt.subplot(gs[row_no,0])# Y ax_0_1 = plt.subplot(gs[row_no,1],sharey=ax_0_0) ax_0_2 = plt.subplot(gs[row_no,2],sharey=ax_0_0) ax_0_3 = plt.subplot(gs[row_no,3],sharey=ax_0_0) #ax_0_4 = plt.subplot(gs[row_no,4],sharey=ax_0_0) row_no = 2 # Y X ax_1_0 = plt.subplot(gs[row_no,0] ,sharex=ax_0_0) ax_1_1 = plt.subplot(gs[row_no,1],sharey=ax_1_0,sharex=ax_0_1) ax_1_2 = plt.subplot(gs[row_no,2],sharey=ax_1_0,sharex=ax_0_2) ax_1_3 = plt.subplot(gs[row_no,3],sharey=ax_1_0,sharex=ax_0_3) #ax_1_4 = plt.subplot(gs[row_no,4],sharey=ax_1_0,sharex=ax_0_4) # Placing the axes in an easy to access dictionary of dictionaries (axes['cis/trans']['xtype']) # ytype is always 'rg' or 're' (or whatever you want it to be... it should be a label in our Pandas dataframe) axes = {'trans':{ 'phi': ax_0_0, 'theta': ax_0_1, 'd': ax_0_2, 'R': ax_0_3 }, 'cis': { 'phi': ax_1_0, 'theta': ax_1_1, 'd': ax_1_2, 'R': ax_1_3 } } # step = 2 colors = ['b','r','g','k'] lengths = np.arange(1,3)*8 omegas = [0,180] # COSTLY: if not os.path.isfile(length_dependent_csv_file): prepare_main_csv_file() df = pd.read_csv(length_dependent_csv_file) # Take the two largest lengths only (otherwise many plots get confusing, as all relationships are length dependent) Ls = sorted(set(df['L']))[-2:] omegas = sorted(set(df['omega'])) for cis_or_trans in axes.keys(): omega = 180.0 if cis_or_trans == 'cis': omega = 0.0 for xtype in axes[cis_or_trans].keys(): # We write to this panel: ax = axes[cis_or_trans][xtype] # # resetting the color palette palette = itertools.cycle(relationships_palette) # for L in Ls: c = next(palette) # 'cdf' stands for 'current dataframe' cdf = df[(df['L'] == L) & (df['omega'] == omega)] cdf = cdf.sort(xtype)#, ascending=False) m = 's' if omega == 0: m = 'o' X = cdf[xtype] if xtype == 'd': X = do_d(X) if xtype == 'theta': X = do_theta(X) Y = list(cdf[ytype]) calcdf = pd.DataFrame({'X':X,'Y':Y}) alpha = 0.5 # GETTING ERROR BARS AND INTERVALS xsteps = float(np.max(X)-np.min(X))/200 Xbins = np.arange(np.min(X)-xsteps/2, np.max(X)+xsteps/2, xsteps) xave = [] yave = [] ystd = [] for i in range(len(Xbins)-1): # temp df ys_in_range = calcdf[(calcdf['X'] >= Xbins[i]) & (calcdf['X'] < Xbins[i+1])]['Y'] if len(ys_in_range): xave.append((Xbins[i]+Xbins[i+1])/2.) yave.append(np.mean(ys_in_range)) ystd.append(np.std(ys_in_range)) xave = np.array(xave) yave = np.array(yave) ystd = np.array(ystd) ax.plot(xave, yave, 'k', color=c, label="$L="+str(L)+r"$")#'#CC4F1B') ax.fill_between(xave, yave - ystd, yave + ystd, interpolate=True, facecolor=c, alpha=0.3) # # # ''' phi psi R theta d |---|---|---|---|---| cis |0,0|0,1|0,2|0,3|0,4| |---|---|---|---|---| trans |1,0|1,1|1,2|0,3|0,4| |---|---|---|---|---| ''' #setting limits based on what is available for cis_or_trans in axes.keys(): # either rg or re for xtype in axes[cis_or_trans].keys(): ax = axes[cis_or_trans][xtype] omega = 180.0 if cis_or_trans == 'cis': omega = 0.0 cdf = df[(df['omega']==omega)] xs = cdf[xtype] ys = cdf[ytype] if xtype == 'd': xs = do_d(xs) ax.set_xticks(range(-6,6,2)) elif xtype == 'theta': xs =
could still contain # importable submodules (e.g., the non-package `os` module containing # the `os.path` submodule). In this case, these submodules are already # imported by this target module's pure-Python code. Since our import # scanner already detects these imports, these submodules need *NOT* be # reimported here. (Doing so would be harmless but inefficient.) if target_attr_names and isinstance(target_module, Package): # For the name of each attribute imported from this target package # into this source module... for target_submodule_partname in target_attr_names: #FIXME: Is this optimization *REALLY* an optimization or at all #necessary? The findNode() method called below should already #be heavily optimized, in which case this optimization here is #premature, senseless, and should be eliminated. # If this attribute is a previously imported submodule of this # target module, optimize this edge case. if target_module.is_submodule(target_submodule_partname): # Graph node for this submodule. target_submodule = target_module.get_submodule( target_submodule_partname) #FIXME: What? Shouldn't "target_submodule" *ALWAYS* be #non-None here? Assert this to be non-None instead. if target_submodule is not None: #FIXME: Why does duplication matter? List searches are #mildly expensive. # If this submodule has not already been added to the # list of submodules to be returned, do so. if target_submodule not in target_modules: self._updateReference( source_module, target_submodule, edge_data=edge_attr) target_modules.append(target_submodule) continue # Fully-qualified name of this submodule. target_submodule_name = ( target_module.identifier + '.' + target_submodule_partname) # Graph node of this submodule if previously imported or None. target_submodule = self.findNode(target_submodule_name) # If this submodule has not been imported, do so as if this # submodule were the only attribute listed by the "import" # clause of this import (e.g., as "from foo import bar" rather # than "from foo import car, far, bar"). if target_submodule is None: # Attempt to import this submodule. try: # Ignore the list of graph nodes returned by this # method. If both this submodule's package and this # submodule are importable, this method returns a # 2-element list whose second element is this # submodule's graph node. However, if this submodule's # package is importable but this submodule is not, # this submodule is either: # # * An ignorable global attribute defined at the top # level of this package's "__init__" submodule. In # this case, this method returns a 1-element list # without raising an exception. # * A non-ignorable unimportable submodule. In this # case, this method raises an "ImportError". # # While the first two cases are disambiguatable by the # length of this list, doing so would render this code # dependent on import_hook() details subject to change. # Instead, call findNode() to decide the truthiness. self.import_hook( target_module_partname, source_module, target_attr_names=[target_submodule_partname], level=level, edge_attr=edge_attr) # Graph node of this submodule imported by the prior # call if importable or None otherwise. target_submodule = self.findNode(target_submodule_name) # If this submodule does not exist, this *MUST* be an # ignorable global attribute defined at the top level # of this package's "__init__" submodule. if target_submodule is None: # Assert this to actually be the case. assert target_module.is_global_attr( target_submodule_partname), ( 'No global named {} in {}.__init__'.format( target_submodule_partname, target_module.identifier)) # Skip this safely ignorable importation to the # next attribute. See similar logic in the body of # _import_importable_package_submodules(). self.msg(4, '_safe_import_hook', 'ignoring imported non-module global', target_module.identifier, target_submodule_partname) continue # If this is a SWIG C extension, instruct PyInstaller # to freeze this extension under its unqualified rather # than qualified name (e.g., as "_csr" rather than # "scipy.sparse.sparsetools._csr"), permitting the # implicit relative import in its parent SWIG module to # successfully find this extension. if is_swig_import: # If a graph node with this name already exists, # avoid collisions by emitting an error instead. if self.findNode(target_submodule_partname): self.msg( 2, 'SWIG import error: %r basename %r ' 'already exists' % ( target_submodule_name, target_submodule_partname)) else: self.msg( 4, 'SWIG import renamed from %r to %r' % ( target_submodule_name, target_submodule_partname)) target_submodule.identifier = ( target_submodule_partname) # If this submodule is unimportable, add a MissingModule. except ImportError as msg: self.msg(2, "ImportError:", str(msg)) target_submodule = self.createNode( MissingModule, target_submodule_name) # Add this submodule to its package. target_module.add_submodule( target_submodule_partname, target_submodule) if target_submodule is not None: self._updateReference( target_module, target_submodule, edge_data=edge_attr) self._updateReference( source_module, target_submodule, edge_data=edge_attr) if target_submodule not in target_modules: target_modules.append(target_submodule) # Return the list of all target modules imported by this call. return target_modules def _scan_code( self, module, module_code_object, module_code_object_ast=None): """ Parse and add all import statements from the passed code object of the passed source module to this graph, recursively. **This method is at the root of all `ModuleGraph` recursion.** Recursion begins here and ends when all import statements in all code objects of all modules transitively imported by the source module passed to the first call to this method have been added to the graph. Specifically, this method: 1. If the passed `module_code_object_ast` parameter is non-`None`, parses all import statements from this object. 2. Else, parses all import statements from the passed `module_code_object` parameter. 1. For each such import statement: 1. Adds to this `ModuleGraph` instance: 1. Nodes for all target modules of these imports. 1. Directed edges from this source module to these target modules. 2. Recursively calls this method with these target modules. Parameters ---------- module : Node Graph node of the module to be parsed. module_code_object : PyCodeObject Code object providing this module's disassembled Python bytecode. Ignored unless `module_code_object_ast` is `None`. module_code_object_ast : optional[ast.AST] Optional abstract syntax tree (AST) of this module if any or `None` otherwise. Defaults to `None`, in which case the passed `module_code_object` is parsed instead. """ # For safety, guard against multiple scans of the same module by # resetting this module's list of deferred target imports. While # uncommon, this edge case can occur due to: # # * Dynamic package replacement via the replacePackage() function. For # example, the real "_xmlplus" package dynamically replaces itself # with the fake "xml" package into the "sys.modules" cache of all # currently loaded modules at runtime. module._deferred_imports = [] # Parse all imports from this module *BEFORE* adding these imports to # the graph. If an AST is provided, parse that rather than this # module's code object. if module_code_object_ast is not None: # Parse this module's AST for imports. self._scan_ast(module, module_code_object_ast) # Parse this module's code object for all relevant non-imports # (e.g., global variable declarations and undeclarations). self._scan_bytecode( module, module_code_object, is_scanning_imports=False) # Else, parse this module's code object for imports. else: self._scan_bytecode( module, module_code_object, is_scanning_imports=True) # Add all imports parsed above to this graph. self._process_imports(module) def _scan_ast(self, module, module_code_object_ast): """ Parse and add all import statements from the passed abstract syntax tree (AST) of the passed source module to this graph, non-recursively. Parameters ---------- module : Node Graph node of the module to be parsed. module_code_object_ast : ast.AST Abstract syntax tree (AST) of this module to be parsed. """ visitor = _Visitor(self, module) visitor.visit(module_code_object_ast) #FIXME: Optimize. Global attributes added by this method are tested by #other methods *ONLY* for packages, implying this method should scan and #handle opcodes pertaining to global attributes (e.g., #"STORE_NAME", "DELETE_GLOBAL") only if the passed "module" #object is an instance of the "Package" class. For all other module types, #these opcodes should simply be ignored. # #After doing so, the "Node._global_attr_names" attribute and all methods #using this attribute (e.g., Node.is_global()) should be moved from the #"Node" superclass to the "Package" subclass. def _scan_bytecode( self, module, module_code_object, is_scanning_imports): """ Parse and add all import statements from the passed code object of the passed source module to this graph, non-recursively. This method parses all reasonably parsable operations (i.e., operations that are both syntactically and semantically parsable _without_ requiring Turing-complete interpretation) directly or indirectly involving module importation from this code object. This includes: * `IMPORT_NAME`, denoting an import statement. Ignored unless the passed `is_scanning_imports` parameter is `True`. * `STORE_NAME` and `STORE_GLOBAL`, denoting
for workers. :vartype allowed_worker_sizes: str :ivar maximum_number_of_machines: Maximum number of VMs in the App Service Environment. :vartype maximum_number_of_machines: int :ivar vip_mappings: Description of IP SSL mapping for the App Service Environment. :vartype vip_mappings: list[~azure.mgmt.web.v2016_09_01.models.VirtualIPMapping] :ivar environment_capacities: Current total, used, and available worker capacities. :vartype environment_capacities: list[~azure.mgmt.web.v2016_09_01.models.StampCapacity] :ivar network_access_control_list: Access control list for controlling traffic to the App Service Environment. :vartype network_access_control_list: list[~azure.mgmt.web.v2016_09_01.models.NetworkAccessControlEntry] :ivar environment_is_healthy: True/false indicating whether the App Service Environment is healthy. :vartype environment_is_healthy: bool :ivar environment_status: Detailed message about with results of the last check of the App Service Environment. :vartype environment_status: str :ivar resource_group: Resource group of the App Service Environment. :vartype resource_group: str :ivar front_end_scale_factor: Scale factor for front-ends. :vartype front_end_scale_factor: int :ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds. :vartype default_front_end_scale_factor: int :ivar api_management_account_id: API Management Account associated with the App Service Environment. :vartype api_management_account_id: str :ivar suspended: :code:`<code>true</code>` if the App Service Environment is suspended; otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the management endpoint is no longer available (most likely because NSG blocked the incoming traffic). :vartype suspended: bool :ivar dynamic_cache_enabled: True/false indicating whether the App Service Environment is suspended. The environment can be suspended e.g. when the management endpoint is no longer available (most likely because NSG blocked the incoming traffic). :vartype dynamic_cache_enabled: bool :ivar cluster_settings: Custom settings for changing the behavior of the App Service Environment. :vartype cluster_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair] :ivar user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db. :vartype user_whitelisted_ip_ranges: list[str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'location': {'required': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'status': {'readonly': True}, 'database_edition': {'readonly': True}, 'database_service_objective': {'readonly': True}, 'upgrade_domains': {'readonly': True}, 'subscription_id': {'readonly': True}, 'last_action': {'readonly': True}, 'last_action_result': {'readonly': True}, 'allowed_multi_sizes': {'readonly': True}, 'allowed_worker_sizes': {'readonly': True}, 'maximum_number_of_machines': {'readonly': True}, 'vip_mappings': {'readonly': True}, 'environment_capacities': {'readonly': True}, 'environment_is_healthy': {'readonly': True}, 'environment_status': {'readonly': True}, 'resource_group': {'readonly': True}, 'default_front_end_scale_factor': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'name_properties_name': {'key': 'properties.name', 'type': 'str'}, 'location_properties_location': {'key': 'properties.location', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'vnet_name': {'key': 'properties.vnetName', 'type': 'str'}, 'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'}, 'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'}, 'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'}, 'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'str'}, 'multi_size': {'key': 'properties.multiSize', 'type': 'str'}, 'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'}, 'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'}, 'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'}, 'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'}, 'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'}, 'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, 'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'}, 'last_action': {'key': 'properties.lastAction', 'type': 'str'}, 'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'}, 'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'}, 'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'}, 'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'}, 'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'}, 'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'}, 'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'}, 'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'}, 'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'}, 'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'}, 'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'}, 'suspended': {'key': 'properties.suspended', 'type': 'bool'}, 'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'}, 'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'}, 'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'}, } def __init__( self, *, location: str, kind: Optional[str] = None, tags: Optional[Dict[str, str]] = None, name_properties_name: Optional[str] = None, location_properties_location: Optional[str] = None, vnet_name: Optional[str] = None, vnet_resource_group_name: Optional[str] = None, vnet_subnet_name: Optional[str] = None, virtual_network: Optional["VirtualNetworkProfile"] = None, internal_load_balancing_mode: Optional[Union[str, "InternalLoadBalancingMode"]] = None, multi_size: Optional[str] = None, multi_role_count: Optional[int] = None, worker_pools: Optional[List["WorkerPool"]] = None, ipssl_address_count: Optional[int] = None, dns_suffix: Optional[str] = None, network_access_control_list: Optional[List["NetworkAccessControlEntry"]] = None, front_end_scale_factor: Optional[int] = None, api_management_account_id: Optional[str] = None, suspended: Optional[bool] = None, dynamic_cache_enabled: Optional[bool] = None, cluster_settings: Optional[List["NameValuePair"]] = None, user_whitelisted_ip_ranges: Optional[List[str]] = None, **kwargs ): """ :keyword kind: Kind of resource. :paramtype kind: str :keyword location: Required. Resource Location. :paramtype location: str :keyword tags: A set of tags. Resource tags. :paramtype tags: dict[str, str] :keyword name_properties_name: Name of the App Service Environment. :paramtype name_properties_name: str :keyword location_properties_location: Location of the App Service Environment, e.g. "West US". :paramtype location_properties_location: str :keyword vnet_name: Name of the Virtual Network for the App Service Environment. :paramtype vnet_name: str :keyword vnet_resource_group_name: Resource group of the Virtual Network. :paramtype vnet_resource_group_name: str :keyword vnet_subnet_name: Subnet of the Virtual Network. :paramtype vnet_subnet_name: str :keyword virtual_network: Description of the Virtual Network. :paramtype virtual_network: ~azure.mgmt.web.v2016_09_01.models.VirtualNetworkProfile :keyword internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values include: "None", "Web", "Publishing". :paramtype internal_load_balancing_mode: str or ~azure.mgmt.web.v2016_09_01.models.InternalLoadBalancingMode :keyword multi_size: Front-end VM size, e.g. "Medium", "Large". :paramtype multi_size: str :keyword multi_role_count: Number of front-end instances. :paramtype multi_role_count: int :keyword worker_pools: Description of worker pools with worker size IDs, VM sizes, and number of workers in each pool. :paramtype worker_pools: list[~azure.mgmt.web.v2016_09_01.models.WorkerPool] :keyword ipssl_address_count: Number of IP SSL addresses reserved for the App Service Environment. :paramtype ipssl_address_count: int :keyword dns_suffix: DNS suffix of the App Service Environment. :paramtype dns_suffix: str :keyword network_access_control_list: Access control list for controlling traffic to the App Service Environment. :paramtype network_access_control_list: list[~azure.mgmt.web.v2016_09_01.models.NetworkAccessControlEntry] :keyword front_end_scale_factor: Scale factor for front-ends. :paramtype front_end_scale_factor: int :keyword api_management_account_id: API Management Account associated with the App Service Environment. :paramtype api_management_account_id: str :keyword suspended: :code:`<code>true</code>` if the App Service Environment is suspended; otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the management endpoint is no longer available (most likely because NSG blocked the incoming traffic). :paramtype suspended: bool :keyword dynamic_cache_enabled: True/false indicating whether the App Service Environment is suspended. The environment can be suspended e.g. when the management endpoint is no longer available (most likely because NSG blocked the incoming traffic). :paramtype dynamic_cache_enabled: bool :keyword cluster_settings: Custom settings for changing the behavior of the App Service Environment. :paramtype cluster_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair] :keyword user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db. :paramtype user_whitelisted_ip_ranges: list[str] """ super(AppServiceEnvironmentResource, self).__init__(kind=kind, location=location, tags=tags, **kwargs) self.name_properties_name = name_properties_name self.location_properties_location = location_properties_location self.provisioning_state = None self.status = None self.vnet_name = vnet_name self.vnet_resource_group_name = vnet_resource_group_name self.vnet_subnet_name = vnet_subnet_name self.virtual_network = virtual_network self.internal_load_balancing_mode = internal_load_balancing_mode self.multi_size = multi_size self.multi_role_count = multi_role_count self.worker_pools = worker_pools self.ipssl_address_count = ipssl_address_count self.database_edition = None self.database_service_objective = None self.upgrade_domains = None self.subscription_id = None self.dns_suffix = dns_suffix self.last_action = None self.last_action_result = None self.allowed_multi_sizes = None self.allowed_worker_sizes = None self.maximum_number_of_machines = None self.vip_mappings = None self.environment_capacities = None self.network_access_control_list = network_access_control_list self.environment_is_healthy = None self.environment_status = None self.resource_group = None self.front_end_scale_factor = front_end_scale_factor self.default_front_end_scale_factor = None self.api_management_account_id = api_management_account_id self.suspended = suspended self.dynamic_cache_enabled = dynamic_cache_enabled self.cluster_settings = cluster_settings self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges class AppServicePlan(Resource): """App Service plan. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Resource Id. :vartype id: str :ivar name: Resource Name. :vartype name: str :ivar kind: Kind of resource. :vartype kind: str :ivar location: Required. Resource Location. :vartype location: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :ivar sku: Description of a SKU for a scalable resource. :vartype sku: ~azure.mgmt.web.v2016_09_01.models.SkuDescription :ivar name_properties_name: Name for the App Service plan. :vartype name_properties_name: str :ivar worker_tier_name: Target worker tier assigned to the App Service plan. :vartype worker_tier_name: str :ivar status: App Service plan status. Possible values include: "Ready", "Pending", "Creating". :vartype status: str or ~azure.mgmt.web.v2016_09_01.models.StatusOptions :ivar subscription: App Service plan subscription. :vartype subscription: str :ivar admin_site_name: App Service plan administration site. :vartype admin_site_name: str :ivar hosting_environment_profile: Specification for the App Service Environment to use for the App Service plan. :vartype hosting_environment_profile: ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentProfile :ivar maximum_number_of_workers: Maximum number of instances that can be assigned to this App Service plan. :vartype maximum_number_of_workers: int :ivar geo_region: Geographical location for the App Service plan. :vartype geo_region: str :ivar per_site_scaling: If :code:`<code>true</code>`, apps
the file is: n_scales xsec_scale_central xsec_scale1 ... n_pdf xsec_pdf0 xsec_pdf1 ....""" scales=[] pdfs=[] for i,evt_file in enumerate(evt_files): path, evt=os.path.split(evt_file) with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f: data_line=f.readline() if "scale variations:" in data_line: for j,scale in enumerate(self.run_card['dynamical_scale_choice']): data_line = f.readline().split() scales_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] try: scales[j] = [a + b for a, b in zip(scales[j], scales_this)] except IndexError: scales+=[scales_this] data_line=f.readline() if "pdf variations:" in data_line: for j,pdf in enumerate(self.run_card['lhaid']): data_line = f.readline().split() pdfs_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] try: pdfs[j] = [a + b for a, b in zip(pdfs[j], pdfs_this)] except IndexError: pdfs+=[pdfs_this] # get the scale uncertainty in percent scale_info=[] for j,scale in enumerate(scales): s_cen=scale[0] if s_cen != 0.0 and self.run_card['reweight_scale'][j]: # max and min of the full envelope s_max=(max(scale)/s_cen-1)*100 s_min=(1-min(scale)/s_cen)*100 # ren and fac scale dependence added in quadrature ren_var=[] fac_var=[] for i in range(len(self.run_card['rw_rscale'])): ren_var.append(scale[i]-s_cen) # central fac scale for i in range(len(self.run_card['rw_fscale'])): fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100 s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100 s_size=len(scale) else: s_max=0.0 s_min=0.0 s_max_q=0.0 s_min_q=0.0 s_size=len(scale) scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \ 'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \ 'label':self.run_card['dynamical_scale_choice'][j], \ 'unc':self.run_card['reweight_scale'][j]}) # check if we can use LHAPDF to compute the PDF uncertainty if any(self.run_card['reweight_pdf']): use_lhapdf=False lhapdf_libdir=subprocess.Popen([self.options['lhapdf'],'--libdir'],\ stdout=subprocess.PIPE).stdout.read().strip() try: candidates=[dirname for dirname in os.listdir(lhapdf_libdir) \ if os.path.isdir(pjoin(lhapdf_libdir,dirname))] except OSError: candidates=[] for candidate in candidates: if os.path.isfile(pjoin(lhapdf_libdir,candidate,'site-packages','lhapdf.so')): sys.path.insert(0,pjoin(lhapdf_libdir,candidate,'site-packages')) try: import lhapdf use_lhapdf=True break except ImportError: sys.path.pop(0) continue if not use_lhapdf: try: candidates=[dirname for dirname in os.listdir(lhapdf_libdir+'64') \ if os.path.isdir(pjoin(lhapdf_libdir+'64',dirname))] except OSError: candidates=[] for candidate in candidates: if os.path.isfile(pjoin(lhapdf_libdir+'64',candidate,'site-packages','lhapdf.so')): sys.path.insert(0,pjoin(lhapdf_libdir+'64',candidate,'site-packages')) try: import lhapdf use_lhapdf=True break except ImportError: sys.path.pop(0) continue if not use_lhapdf: try: import lhapdf use_lhapdf=True except ImportError: logger.warning("Failed to access python version of LHAPDF: "\ "cannot compute PDF uncertainty from the "\ "weights in the events. The weights in the LHE " \ "event files will still cover all PDF set members, "\ "but there will be no PDF uncertainty printed in the run summary. \n "\ "If the python interface to LHAPDF is available on your system, try "\ "adding its location to the PYTHONPATH environment variable and the"\ "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).") use_lhapdf=False # turn off lhapdf printing any messages if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0) pdf_info=[] for j,pdfset in enumerate(pdfs): p_cen=pdfset[0] if p_cen != 0.0 and self.run_card['reweight_pdf'][j]: if use_lhapdf: pdfsetname=self.run_card['lhapdfsetname'][j] try: p=lhapdf.getPDFSet(pdfsetname) ep=p.uncertainty(pdfset,-1) p_cen=ep.central p_min=abs(ep.errminus/p_cen)*100 p_max=abs(ep.errplus/p_cen)*100 p_type=p.errorType p_size=p.size p_conf=p.errorConfLevel except: logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname) p_min=0.0 p_max=0.0 p_type='unknown' p_conf='unknown' p_size=len(pdfset) else: p_min=0.0 p_max=0.0 p_type='unknown' p_conf='unknown' p_size=len(pdfset) pdfsetname=self.run_card['lhaid'][j] else: p_min=0.0 p_max=0.0 p_type='none' p_conf='unknown' p_size=len(pdfset) pdfsetname=self.run_card['lhaid'][j] pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \ 'unc':p_type, 'name':pdfsetname, 'size':p_size, \ 'label':self.run_card['lhaid'][j], 'conf':p_conf}) scale_pdf_info=[scale_info,pdf_info] return scale_pdf_info def wait_for_complete(self, run_type): """this function waits for jobs on cluster to complete their run.""" starttime = time.time() #logger.info(' Waiting for submitted jobs to complete') update_status = lambda i, r, f: self.update_status((i, r, f, run_type), starttime=starttime, level='parton', update_results=True) try: self.cluster.wait(self.me_dir, update_status) except: self.cluster.remove() raise def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False): """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" self.ijob = 0 if run_type != 'shower': self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) for args in arg_list: for Pdir, jobs in job_dict.items(): for job in jobs: self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) if self.cluster_mode == 2: time.sleep(1) # security to allow all jobs to be launched else: self.njobs = len(arg_list) for args in arg_list: [(cwd, exe)] = job_dict.items() self.run_exe(exe, args, run_type, cwd) self.wait_for_complete(run_type) def check_event_files(self,jobs): """check the integrity of the event files after splitting, and resubmit those which are not nicely terminated""" jobs_to_resubmit = [] for job in jobs: last_line = '' try: last_line = subprocess.Popen( ['tail', '-n1', pjoin(job['dirname'], 'events.lhe')], \ stdout = subprocess.PIPE).stdout.read().strip() except IOError: pass if last_line != "</LesHouchesEvents>": jobs_to_resubmit.append(job) self.njobs = 0 if jobs_to_resubmit: run_type = 'Resubmitting broken jobs' logger.info('Some event files are broken, corresponding jobs will be resubmitted.') for job in jobs_to_resubmit: logger.debug('Resubmitting ' + job['dirname'] + '\n') self.run_all_jobs(jobs_to_resubmit,2,fixed_order=False) def find_jobs_to_split(self, pdir, job, arg): """looks into the nevents_unweighed_splitted file to check how many split jobs are needed for this (pdir, job). arg is F, B or V""" # find the number of the integration channel splittings = [] ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() pattern = re.compile('for i in (\d+) ; do') match = re.search(pattern, ajob) channel = match.groups()[0] # then open the nevents_unweighted_splitted file and look for the # number of splittings to be done nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() # This skips the channels with zero events, because they are # not of the form GFXX_YY, but simply GFXX pattern = re.compile(r"%s_(\d+)/events.lhe" % \ pjoin(pdir, 'G%s%s' % (arg,channel))) matches = re.findall(pattern, nevents_file) for m in matches: splittings.append(m) return splittings def run_exe(self, exe, args, run_type, cwd=None): """this basic function launch locally/on cluster exe with args as argument. """ # first test that exe exists: execpath = None if cwd and os.path.exists(pjoin(cwd, exe)): execpath = pjoin(cwd, exe) elif not cwd and os.path.exists(exe): execpath = exe else: raise aMCatNLOError('Cannot find executable %s in %s' \ % (exe, os.getcwd())) # check that the executable has exec permissions if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): subprocess.call(['chmod', '+x', exe], cwd=cwd) # finally run it if self.cluster_mode == 0: #this is for the serial run misc.call(['./'+exe] + args, cwd=cwd) self.ijob += 1 self.update_status((max([self.njobs - self.ijob - 1, 0]), min([1, self.njobs - self.ijob]), self.ijob, run_type), level='parton') #this is for the cluster/multicore run elif 'reweight' in exe: # a reweight run # Find the correct PDF input file input_files, output_files = [], [] pdfinput = self.get_pdf_input_filename() if os.path.exists(pdfinput): input_files.append(pdfinput) input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) input_files.append(args[0]) output_files.append('%s.rwgt' % os.path.basename(args[0])) output_files.append('reweight_xsec_events.output') output_files.append('scale_pdf_dependence.dat') return self.cluster.submit2(exe, args, cwd=cwd, input_files=input_files, output_files=output_files, required_output=output_files) elif 'ajob' in exe: # the 'standard' amcatnlo job # check if args is a list of string if type(args[0]) == str: input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd,args) #submitting self.cluster.submit2(exe, args, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) # # keep track of folders and arguments for splitted evt gen # subfolder=output_files[-1].split('/')[0] # if len(args) == 4 and '_' in subfolder: # self.split_folders[pjoin(cwd,subfolder)] = [exe] + args elif 'shower' in exe: # a shower job # args are [shower, output(HEP or TOP), run_name] # cwd is the shower rundir, where the executable are found input_files, output_files = [], [] shower = args[0] # the input files if shower == 'PYTHIA8': input_files.append(pjoin(cwd, 'Pythia8.exe')) input_files.append(pjoin(cwd, 'Pythia8.cmd')) if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): input_files.append(pjoin(cwd, 'config.sh')) input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) else: input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) else: input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) if shower == 'HERWIGPP': if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): input_files.append(pjoin(cwd, 'Herwig++')) if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): input_files.append(pjoin(cwd, 'Herwig')) input_files.append(pjoin(cwd, 'HepMCFortran.so')) if len(args) == 3: if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) else: raise aMCatNLOError, 'Event file not present in %s' % \ pjoin(self.me_dir, 'Events', self.run_name) else: input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) # the output files if len(args) == 3: output_files.append('mcatnlo_run.log') else: output_files.append('mcatnlo_run_%s.log' % args[3]) if args[1] == 'HEP': if len(args) == 3: fname = 'events' else: fname = 'events_%s' % args[3] if shower in ['PYTHIA8', 'HERWIGPP']: output_files.append(fname + '.hepmc.gz') else: output_files.append(fname + '.hep.gz') elif args[1] == 'TOP' or args[1] == 'HWU': if len(args) == 3: fname = 'histfile' else: fname = 'histfile_%s' % args[3] output_files.append(fname + '.tar') else: raise aMCatNLOError, 'Not a valid output argument for shower job : %d' % args[1] #submitting self.cluster.submit2(exe, args, cwd=cwd, input_files=input_files, output_files=output_files) else: return self.cluster.submit(exe, args, cwd=cwd) def getIO_ajob(self,exe,cwd, args): # use local disk if possible => need to stands what are the # input/output files output_files = [] required_output = [] input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), pjoin(cwd, 'symfact.dat'), pjoin(cwd, 'iproc.dat'), pjoin(cwd, 'initial_states_map.dat'), pjoin(cwd, 'configs_and_props_info.dat'), pjoin(cwd, 'leshouche_info.dat'), pjoin(cwd, 'FKS_params.dat')] # For GoSam interface, we must copy the SLHA card as well if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) if os.path.exists(pjoin(cwd,'nevents.tar')): input_files.append(pjoin(cwd,'nevents.tar')) if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): input_files.append(pjoin(cwd, 'OLE_order.olc'))
from tfumap.load_datasets import load_CIFAR10, load_MNIST, load_FMNIST, mask_labels import tensorflow as tf from tfumap.paths import MODEL_DIR import numpy as np pretrained_networks = { "cifar10_old": { "augmented": { 4: "cifar10_4____2020_08_09_22_16_45_780732_baseline_augmented", # 15 16: "cifar10_16____2020_08_09_22_43_34_001017_baseline_augmented", # 29 64: "cifar10_64____2020_08_09_22_16_13_299376_baseline_augmented", # 47 256: "cifar10_256____2020_08_09_22_05_58_228942_baseline_augmented", # 71 1024: "cifar10_1024____2020_08_09_23_20_57_619002_baseline_augmented", # 84 "full": "cifar10_full____2020_08_09_22_03_51_910408_baseline_augmented", # ~91.5 }, "not_augmented": { 4: "cifar10_4____2020_08_09_22_13_28_904367_baseline", # 0.1542 16: "cifar10_16____2020_08_09_21_58_11_099843_baseline", # 0.2335 64: "cifar10_64____2020_08_09_22_14_33_994011_baseline", # 0.3430 256: "cifar10_128____2020_08_09_21_58_00_869329_baseline", # 0.4693 1024: "cifar10_1024____2020_08_09_22_14_06_923244_baseline", # 0.7864 "full": "cifar10_full____2020_08_09_21_54_03_152503_baseline", # 0.8923 }, }, "cifar10": { "augmented": { 4: "cifar10_4____2020_08_11_19_25_58_939055_baseline_augmented", # 15 # 26 16: "cifar10_16____2020_08_11_19_25_49_190428_baseline_augmented", # 29 # 40 64: "cifar10_64____2020_08_11_19_25_41_266466_baseline_augmented", # 47 # ~60 256: "cifar10_256____2020_08_11_19_25_33_546350_baseline_augmented", # 71 # 76 1024: "cifar10_1024____2020_08_11_19_25_33_541963_baseline_augmented", # 84 # 86 "full": "cifar10_full____2020_08_11_19_25_33_543821_baseline_augmented", # ~91.5 # ~93 }, "not_augmented": { 4: "cifar10_4____2020_08_17_16_02_48_330135_baseline", # 0.1542 # 21 16: "cifar10_16____2020_08_17_15_08_23_820108_baseline", # 0.2335 # 30 64: "cifar10_64____2020_08_17_15_08_38_407886_baseline", # 0.3430 # 50 256: "cifar10_256____2020_08_17_22_16_16_108071_baseline", # 0.4693 # 72 1024: "cifar10_1024____2020_08_17_15_08_08_912644_baseline", # 0.7864 # 84 "full": "cifar10_full____2020_08_17_15_08_15_778694_baseline", # 0.8923 # 90 }, "umap_augmented_learned": { 4: "cifar10_0.0_4____2020_08_19_00_40_15_425455_umap_augmented", # 16: "cifar10_0.0_16____2020_08_19_00_40_13_037112_umap_augmented", 64: "cifar10_0.0_64____2020_08_19_00_40_13_032397_umap_augmented", 256: "cifar10_0.0_256____2020_08_18_16_16_47_694512_umap_augmented", 1024: "cifar10_0.0_1024____2020_08_19_10_25_26_973224_umap_augmented", "full": "cifar10_0.0_full____2020_08_19_00_40_18_936212_umap_augmented", }, "umap_not_augmented": { 4: "cifar10_0.0_4____2020_08_19_14_35_43_127626_umap_augmented", # 16: "cifar10_0.0_16____2020_08_19_14_35_43_867336_umap_augmented", 64: "cifar10_0.0_64____2020_08_19_14_35_43_736036_umap_augmented", 256: "cifar10_0.0_256____2020_08_19_14_38_31_105228_umap_augmented", 1024: "cifar10_0.0_1024____2020_08_19_14_35_43_823739_umap_augmented", "full": "cifar10_0.0_full____2020_08_19_14_32_51_275942_umap_augmented", }, "umap_not_augmented_thresh": { 4: "cifar10_0.8_4____2020_08_19_23_00_09_641532_umap_augmented", # 16: "cifar10_0.8_16____2020_08_19_23_00_00_125286_umap_augmented", 64: "cifar10_0.8_64____2020_08_19_23_00_54_552899_umap_augmented", 256: "cifar10_0.8_256____2020_08_19_23_00_56_468894_umap_augmented", 1024: "cifar10_0.8_1024____2020_08_19_23_00_59_934762_umap_augmented", "full": "cifar10_0.8_full____2020_08_19_23_01_03_044142_umap_augmented", }, "umap_euclidean_augmented": { # umap_euclidean_augmented_no_thresh 4: "cifar10_0.0_4____2020_08_20_10_49_23_565699_umap_augmented", # 16: "cifar10_0.0_16____2020_08_20_10_52_39_313456_umap_augmented", 64: "cifar10_0.0_64____2020_08_20_10_52_40_783860_umap_augmented", 256: "cifar10_0.0_256____2020_08_20_10_52_47_615557_umap_augmented", 1024: "cifar10_0.0_1024____2020_08_20_10_52_58_310917_umap_augmented", "full": "cifar10_0.0_full____2020_08_20_10_53_00_819968_umap_augmented", }, "umap_not_augmented_linear_thresh": { 4: "cifar10_0.8_4____2020_08_22_22_47_43_598023_umap_augmented", # 16: "cifar10_0.8_16____2020_08_22_22_47_57_967494_umap_augmented", 64: "cifar10_0.8_64____2020_08_22_22_47_27_952365_umap_augmented", 256: "cifar10_0.8_256____2020_08_22_22_48_38_890043_umap_augmented", 1024: "cifar10_0.8_1024____2020_08_22_22_49_43_660652_umap_augmented", "full": "cifar10_0.8_full____2020_08_22_22_49_37_683086_umap_augmented", }, "umap_euclidean": { 4: "cifar10_0.0_4____2020_08_24_10_10_03_033874_umap_augmented", 16: "cifar10_0.0_16____2020_08_24_00_26_41_868150_umap_augmented", 64: "cifar10_0.0_64____2020_08_24_00_26_53_791994_umap_augmented", 256: "cifar10_0.0_256____2020_08_24_00_22_53_202346_umap_augmented", 1024: "cifar10_0.0_1024____2020_08_24_00_22_53_212673_umap_augmented", "full": "cifar10_0.0_full____2020_08_23_23_52_33_359986_umap_augmented", }, "umap_learned": { 4: "cifar10_0.0_4____2020_08_19_14_35_43_127626_umap_augmented", # 16: "cifar10_0.0_16____2020_08_19_14_35_43_867336_umap_augmented", 64: "cifar10_0.0_64____2020_08_19_14_35_43_736036_umap_augmented", 256: "cifar10_0.0_256____2020_08_19_14_38_31_105228_umap_augmented", 1024: "cifar10_0.0_1024____2020_08_19_14_35_43_823739_umap_augmented", "full": "cifar10_0.0_full____2020_08_19_14_32_51_275942_umap_augmented", }, }, "mnist": { "not_augmented": { 4: "mnist_4____2020_08_23_13_59_31_357892_baseline", 16: "mnist_16____2020_08_23_14_13_03_306576_baseline", 64: "mnist_64____2020_08_23_14_13_19_397319_baseline", 256: "mnist_256____2020_08_23_14_12_28_828611_baseline", 1024: "mnist_1024____2020_08_23_14_12_00_839816_baseline", "full": "mnist_full____2020_08_23_14_02_35_917340_baseline", }, "augmented": { 4: "mnist_4____2020_08_26_22_34_26_172040_baseline_augmented", 16: "mnist_16____2020_08_26_22_36_42_823740_baseline_augmented", 64: "mnist_64____2020_08_26_22_37_03_013806_baseline_augmented", 256: "mnist_256____2020_08_26_22_38_00_695064_baseline_augmented", 1024: "mnist_1024____2020_08_26_22_38_22_879325_baseline_augmented", "full": "mnist_full____2020_08_26_22_34_57_589833_baseline_augmented", }, "umap_euclidean": { 4: "mnist_0.0_4____2020_08_23_19_39_30_768509_umap_augmented", 16: "mnist_0.0_16____2020_08_23_19_27_31_722774_umap_augmented", 64: "mnist_0.0_64____2020_08_23_18_32_38_592348_umap_augmented", 256: "mnist_0.0_256____2020_08_23_19_39_57_288829_umap_augmented", 1024: "mnist_0.0_1024____2020_08_23_19_44_01_747431_umap_augmented", # "full": "mnist_0.0_full____2020_08_23_23_07_06_598185_umap_augmented", "full": "mnist_0.0_full____2020_08_23_23_11_15_364937_umap_augmented", }, "umap_learned": { 4: "mnist_0.0_4____2020_08_24_13_43_38_697668_umap_augmented", 16: "mnist_0.0_16____2020_08_24_16_51_10_703116_umap_augmented", 64: "mnist_0.0_64____2020_08_24_16_51_16_969542_umap_augmented", 256: "mnist_0.0_256____2020_08_24_16_53_11_404946_umap_augmented", 1024: "mnist_0.0_1024____2020_08_24_16_53_15_376183_umap_augmented", "full": "mnist_0.0_full____2020_08_24_13_43_38_497837_umap_augmented", }, "umap_euclidean_augmented": { 4: "mnist_0.0_4____2020_08_28_01_19_15_530909_umap_augmented", 16: "mnist_0.0_16____2020_08_28_01_22_00_266602_umap_augmented", 64: "mnist_0.0_64____2020_08_28_01_22_22_251679_umap_augmented", 256: "mnist_0.0_256____2020_08_28_01_22_37_322969_umap_augmented", 1024: "mnist_0.0_1024____2020_08_28_10_21_10_652408_umap_augmented", "full": "mnist_0.0_full____2020_08_28_10_21_10_309737_umap_augmented", }, "umap_augmented_learned": { 4: "mnist_0.0_4____2020_08_28_22_36_55_334573_umap_augmented", 16: "mnist_0.0_16____2020_08_28_22_36_36_245588_umap_augmented", 64: "mnist_0.0_64____2020_08_28_10_58_23_001653_umap_augmented", 256: "mnist_0.0_256____2020_08_28_10_58_44_499275_umap_augmented", 1024: "mnist_0.0_1024____2020_08_28_11_00_20_544491_umap_augmented", "full": "mnist_0.0_full____2020_08_28_11_00_23_221668_umap_augmented", }, }, "fmnist": { "not_augmented": { 4: "fmnist_4____2020_08_23_14_15_38_194490_baseline", 16: "fmnist_16____2020_08_23_14_15_50_074976_baseline", 64: "fmnist_64____2020_08_23_14_16_00_145880_baseline", 256: "fmnist_256____2020_08_23_14_14_27_904250_baseline", 1024: "fmnist_1024____2020_08_23_14_13_39_538728_baseline", "full": "fmnist_full____2020_08_23_14_06_13_546999_baseline", }, "augmented": { 4: "fmnist_4____2020_08_25_17_18_57_856259_baseline_augmented", 16: "fmnist_16____2020_08_25_17_19_58_221943_baseline_augmented", 64: "fmnist_64____2020_08_25_17_20_33_647542_baseline_augmented", 256: "fmnist_256____2020_08_25_17_20_55_354044_baseline_augmented", 1024: "fmnist_1024____2020_08_25_17_21_21_486291_baseline_augmented", "full": "fmnist_full____2020_08_25_17_21_42_014099_baseline_augmented", }, "placeholder": {4: "", 16: "", 64: "", 256: "", 1024: "", "full": ""}, "umap_over_z": { 4: "fmnist_0.0_4____2020_08_27_11_30_38_602000_umap_augmented", 16: "fmnist_0.0_16____2020_08_27_11_30_41_024752_umap_augmented", 64: "fmnist_0.0_64____2020_08_27_11_46_44_906423_umap_augmented", 256: "fmnist_0.0_256____2020_08_27_11_47_02_912498_umap_augmented", 1024: "", "full": "", }, "umap_augmented_learned": { 4: "fmnist_0.0_4____2020_08_25_22_52_13_661088_umap_augmented", 16: "fmnist_0.0_16____2020_08_25_22_53_12_075808_umap_augmented", 64: "fmnist_0.0_64____2020_08_25_22_58_52_822672_umap_augmented", 256: "fmnist_0.0_256____2020_08_25_22_59_00_936495_umap_augmented", 1024: "fmnist_0.0_1024____2020_08_25_22_59_15_453823_umap_augmented", "full": "fmnist_0.0_full____2020_08_25_22_59_11_829778_umap_augmented", }, "umap_euclidean": { 4: "fmnist_0.0_4____2020_08_23_18_48_03_409056_umap_augmented", 16: "fmnist_0.0_16____2020_08_23_21_25_30_890380_umap_augmented", 64: "fmnist_0.0_64____2020_08_23_19_43_20_063919_umap_augmented", 256: "fmnist_0.0_256____2020_08_23_19_44_36_506473_umap_augmented", 1024: "fmnist_0.0_1024____2020_08_23_21_25_43_287069_umap_augmented", "full": "fmnist_0.0_full____2020_08_23_23_13_31_899132_umap_augmented", }, "umap_learned": { 4: "fmnist_0.0_4____2020_08_24_10_19_02_171374_umap_augmented", 16: "fmnist_0.0_16____2020_08_24_10_19_11_697170_umap_augmented", 64: "fmnist_0.0_64____2020_08_24_10_19_33_327157_umap_augmented", 256: "fmnist_0.0_256____2020_08_24_10_19_51_978912_umap_augmented", 1024: "fmnist_0.0_1024____2020_08_24_10_20_06_630456_umap_augmented", "full": "fmnist_0.0_full____2020_08_24_10_20_11_972145_umap_augmented", }, "umap_intersection": { 4: "fmnist_0.0_4____2020_08_24_23_43_25_574078_umap_augmented", 16: "fmnist_0.0_16____2020_08_24_23_43_35_567328_umap_augmented", 64: "fmnist_0.0_64____2020_08_24_23_43_35_567450_umap_augmented", 256: "fmnist_0.0_256____2020_08_24_23_43_45_557361_umap_augmented", 1024: "fmnist_0.0_1024____2020_08_24_23_43_45_643845_umap_augmented", "full": "fmnist_0.0_full____2020_08_24_23_48_54_578235_umap_augmented", }, "umap_euclidean_augmented": { 4: "fmnist_0.0_4____2020_08_26_11_16_46_042019_umap_augmented", 16: "fmnist_0.0_16____2020_08_26_13_30_25_749568_umap_augmented", 64: "fmnist_0.0_64____2020_08_26_13_30_25_380156_umap_augmented", 256: "fmnist_0.0_256____2020_08_26_11_21_26_903869_umap_augmented", 1024: "fmnist_0.0_1024____2020_08_26_11_21_26_883542_umap_augmented", "full": "fmnist_0.0_full____2020_08_26_13_30_25_074505_umap_augmented", }, }, } def load_pretrained_weights(dataset, augmented, labels_per_class, encoder, classifier): aug_str = "augmented" if augmented else "not_augmented" pretrained_weights_loc = pretrained_networks[dataset][aug_str][labels_per_class] load_folder = ( MODEL_DIR / "semisupervised-keras" / dataset / str(labels_per_class) / pretrained_weights_loc ) classifier.load_weights((load_folder / "classifier").as_posix()) encoder.load_weights((load_folder / "encoder").as_posix()) return encoder, classifier def load_dataset(dataset, labels_per_class): if dataset == "cifar10": X_train, X_test, X_valid, Y_train, Y_test, Y_valid = load_CIFAR10(flatten=False) num_classes = 10 dims = (32, 32, 3) elif dataset == "mnist": X_train, X_test, X_valid, Y_train, Y_test, Y_valid = load_MNIST(flatten=False) num_classes = 10 dims = (28, 28, 1) elif dataset == "fmnist": X_train, X_test, X_valid, Y_train, Y_test, Y_valid = load_FMNIST(flatten=False) num_classes = 10 dims = (28, 28, 1) # get labeled data if labels_per_class == "full": X_labeled = X_train Y_masked = Y_labeled = Y_train else: X_labeled, Y_labeled, Y_masked = mask_labels( X_train, Y_train, labels_per_class=labels_per_class ) # create one hot representation Y_valid_one_hot = tf.keras.backend.one_hot(Y_valid, num_classes) Y_labeled_one_hot = tf.keras.backend.one_hot(Y_labeled, num_classes) return ( X_train, X_test, X_labeled, Y_labeled, Y_masked, X_valid, Y_train, Y_test, Y_valid, Y_valid_one_hot, Y_labeled_one_hot, num_classes, dims, ) def load_architecture(dataset, n_latent_dims, extend_embedder=True): if dataset == "cifar10": return load_cifar10_CNN13(n_latent_dims, extend_embedder) elif dataset == "mnist": return load_mnist_CNN(n_latent_dims, extend_embedder) elif dataset == "fmnist": return load_mnist_CNN(n_latent_dims, extend_embedder) from tensorflow.keras import datasets, layers, models from tensorflow_addons.layers import WeightNormalization def load_mnist_CNN( n_latent_dims, extend_embedder=True, dims=(28, 28, 1), num_classes=10, lr_alpha=0.1, dropout_rate=0.5, ): """ references for network: - https://github.com/benathi/fastswa-semi-sup/blob/master/mean_teacher/architectures.py - https://github.com/vikasverma1077/ICT/blob/master/networks/lenet.py - https://github.com/brain-research/realistic-ssl-evaluation """ def conv_block(filts, name, kernel_size=(3, 3), padding="same", **kwargs): return WeightNormalization( layers.Conv2D( filts, kernel_size, activation=None, padding=padding, **kwargs ), name="conv" + name, ) encoder = models.Sequential() encoder.add(tf.keras.Input(shape=dims)) ### conv1a name = "1a" encoder.add(conv_block(name=name, filts=128, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv1b name = "1b" encoder.add(conv_block(name=name, filts=128, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv1c name = "1c" encoder.add(conv_block(name=name, filts=128, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) # max pooling encoder.add( layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding="valid", name="mp1") ) # dropout encoder.add(layers.Dropout(dropout_rate, name="drop1")) ### conv2a name = "2a" encoder.add(conv_block(name=name, filts=256, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha)) ### conv2b name = "2b" encoder.add(conv_block(name=name, filts=256, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv2c name = "2c" encoder.add(conv_block(name=name, filts=256, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) # max pooling encoder.add( layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding="valid", name="mp2") ) # dropout encoder.add(layers.Dropout(dropout_rate, name="drop2")) ### conv3a name = "3a" encoder.add(conv_block(name=name, filts=512, kernel_size=(3, 3), padding="valid")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv3b name = "3b" encoder.add(conv_block(name=name, filts=256, kernel_size=(1, 1), padding="valid")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv3c name = "3c" encoder.add(conv_block(name=name, filts=128, kernel_size=(1, 1), padding="valid")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) # max pooling encoder.add(layers.AveragePooling2D(pool_size=(3, 3), strides=2, padding="valid")) encoder.add(layers.Flatten()) encoder.add(layers.Dense(256, activation=None, name="z")) classifier = models.Sequential() classifier.add(tf.keras.Input(shape=(256))) classifier.add(WeightNormalization(layers.Dense(256, activation=None))) classifier.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc1")) classifier.add(WeightNormalization(layers.Dense(256, activation=None))) classifier.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc2")) classifier.add( WeightNormalization(layers.Dense(num_classes, activation=None), name="y_") ) embedder = models.Sequential() embedder.add(tf.keras.Input(shape=(256))) if extend_embedder: embedder.add(WeightNormalization(layers.Dense(256, activation=None))) embedder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc1")) embedder.add(WeightNormalization(layers.Dense(256, activation=None))) embedder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc2")) embedder.add( WeightNormalization(layers.Dense(n_latent_dims, activation=None), name="z_") ) return encoder, classifier, embedder def load_cifar10_CNN13( n_latent_dims, extend_embedder=True, dims=(32, 32, 3), num_classes=10, lr_alpha=0.1, dropout_rate=0.5, ): """ references for network: - https://github.com/benathi/fastswa-semi-sup/blob/master/mean_teacher/architectures.py - https://github.com/vikasverma1077/ICT/blob/master/networks/lenet.py - https://github.com/brain-research/realistic-ssl-evaluation """ def conv_block(filts, name, kernel_size=(3, 3), padding="same", **kwargs): return WeightNormalization( layers.Conv2D( filts, kernel_size, activation=None, padding=padding, **kwargs ), name="conv" + name, ) encoder = models.Sequential() encoder.add(tf.keras.Input(shape=dims)) ### conv1a name = "1a" encoder.add(conv_block(name=name, filts=128, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv1b name = "1b" encoder.add(conv_block(name=name, filts=128, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv1c name = "1c" encoder.add(conv_block(name=name, filts=128, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) # max pooling encoder.add( layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding="valid", name="mp1") ) # dropout encoder.add(layers.Dropout(dropout_rate, name="drop1")) ### conv2a name = "2a" encoder.add(conv_block(name=name, filts=256, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha)) ### conv2b name = "2b" encoder.add(conv_block(name=name, filts=256, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv2c name = "2c" encoder.add(conv_block(name=name, filts=256, kernel_size=(3, 3), padding="same")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) # max pooling encoder.add( layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding="valid", name="mp2") ) # dropout encoder.add(layers.Dropout(dropout_rate, name="drop2")) ### conv3a name = "3a" encoder.add(conv_block(name=name, filts=512, kernel_size=(3, 3), padding="valid")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv3b name = "3b" encoder.add(conv_block(name=name, filts=256, kernel_size=(1, 1), padding="valid")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) ### conv3c name = "3c" encoder.add(conv_block(name=name, filts=128, kernel_size=(1, 1), padding="valid")) encoder.add(layers.BatchNormalization(name="bn" + name)) encoder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelu" + name)) # max pooling encoder.add(layers.AveragePooling2D(pool_size=(6, 6), strides=2, padding="valid")) encoder.add(layers.Flatten()) encoder.add(layers.Dense(256, activation=None, name="z")) classifier = models.Sequential() classifier.add(tf.keras.Input(shape=(256))) classifier.add(WeightNormalization(layers.Dense(256, activation=None))) classifier.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc1")) classifier.add(WeightNormalization(layers.Dense(256, activation=None))) classifier.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc2")) classifier.add( WeightNormalization(layers.Dense(num_classes, activation=None), name="y_") ) embedder = models.Sequential() embedder.add(tf.keras.Input(shape=(256))) if extend_embedder: embedder.add(WeightNormalization(layers.Dense(256, activation=None))) embedder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc1")) embedder.add(WeightNormalization(layers.Dense(256, activation=None))) embedder.add(layers.LeakyReLU(alpha=lr_alpha, name="lrelufc2")) embedder.add( WeightNormalization(layers.Dense(n_latent_dims, activation=None), name="z_") ) return encoder, classifier, embedder from scipy import optimize def find_a_b(min_dist=0.1): """ determine optimal params a, b to such that distances less than min_dist have a probability of zero """ # input distances x = np.linspace(0, 3, 300) # optimal output (if close enough, don't try to make closer) y = np.exp(-x +
self.read_reg(self.REG_AXES_ENABLE, 1) if modes == self.INTERRUPUT_LATCH_DISABLE: self.__txbuf[0] = rslt[0] & 0xFD else: self.__txbuf[0] = rslt[0] | 0x02 self.write_reg(self.REG_AXES_ENABLE, self.__txbuf) def set_threshold_interrupt(self, mode, threshold, polarity, channel_x = INTERRUPT_X_ENABLE, channel_y = INTERRUPT_Y_ENABLE, channel_z = INTERRUPT_Z_ENABLE): '''! @brief Set threshold interrupt, an interrupt is triggered when the geomagnetic value of a channel is beyond/below the threshold @n High polarity: active on high, the default is low level, which turns to high level when the interrupt is triggered. @n Low polarity: active on low, the default is high level, which turns to low level when the interrupt is triggered. @param mode @n LOW_THRESHOLD_INTERRUPT Low threshold interrupt mode @n HIGH_THRESHOLD_INTERRUPT High threshold interrupt mode @param threshold @n Threshold, default to expand 16 times, for example: under low threshold mode, if the threshold is set to be 1, actually the geomagnetic data below 16 will trigger an interrupt @param polarity @n POLARITY_HIGH High polarity @n POLARITY_LOW Low polarity @param channel_x @n INTERRUPT_X_ENABLE Enable low threshold interrupt at x-axis @n INTERRUPT_X_DISABLE Disable low threshold interrupt at x-axis @param channel_y @n INTERRUPT_Y_ENABLE Enable low threshold interrupt at y-axis @n INTERRUPT_Y_DISABLE Disable low threshold interrupt at y-axis @param channel_z @n INTERRUPT_Z_ENABLE Enable low threshold interrupt at z-axis @n INTERRUPT_Z_DISABLE Disable low threshold interrupt at z-axis ''' if mode == self.LOW_THRESHOLD_INTERRUPT: self.__threshold_mode = self.LOW_THRESHOLD_INTERRUPT self.set_low_threshold_interrupt(channel_x, channel_y, channel_z, threshold, polarity) else: self.__threshold_mode = self.HIGH_THRESHOLD_INTERRUPT self.set_high_threshold_interrupt(channel_x, channel_y, channel_z, threshold, polarity) def get_threshold_interrupt_data(self): '''! @brief Get the data that threshold interrupt occured @return Return the list for storing geomagnetic data, how the data at 3 axis influence interrupt status, @n [0] The data triggering threshold at x-axis, when the data is NO_DATA, the interrupt is triggered. @n [1] The data triggering threshold at y-axis, when the data is NO_DATA, the interrupt is triggered. @n [2] The data triggering threshold at z-axis, when the data is NO_DATA, the interrupt is triggered. @n [3] The character string storing the trigger threshold interrupt status @n [4] The binary data format of storing threshold interrupt status are as follows @n bit0 is 1 indicate threshold interrupt is triggered at x-axis @n bit1 is 1 indicate threshold interrupt is triggered at y-axis @n bit2 is 1 indicate threshold interrupt is triggered at z-axis @n ------------------------------------ @n | bit7 ~ bit3 | bit2 | bit1 | bit0 | @n ------------------------------------ @n | reserved | 0 | 0 | 0 | @n ------------------------------------ ''' data = [0]*10 str1 = "" if self.__threshold_mode == self.LOW_THRESHOLD_INTERRUPT: state = self.get_low_threshold_interrupt_state() else: state = self.get_high_threshold_interrupt_state() rslt = self.get_geomagnetic() if (state>>0)&0x01: data[0] = rslt[0] str1 += "X " else: data[0] = self.NO_DATA if (state>>1)&0x01: data[1] = rslt[1] str1 += "Y " else: data[1] = self.NO_DATA if (state>>2)&0x01: data[2] = rslt[2] str1 += "Z " else: data[2] = self.NO_DATA if state != 0: str1 += " threshold interrupt" data[3] = str1 data[4] = state&0x07 return data def set_low_threshold_interrupt(self, channel_x, channel_y, channel_z, low_threshold, polarity): '''! @brief Set low threshold interrupt, an interrupt is triggered when the geomagnetic value of a channel is below the low threshold @n High polarity: active on high, the default is low level, which turns to high level when the interrupt is triggered. @n Low polarity: active on low, the default is high level, which turns to low level when the interrupt is triggered. @param channel_x @n INTERRUPT_X_ENABLE Enable low threshold interrupt at x-axis @n INTERRUPT_X_DISABLE Disable low threshold interrupt at x-axis @param channel_y @n INTERRUPT_Y_ENABLE Enable low threshold interrupt at y-axis @n INTERRUPT_Y_DISABLE Disable low threshold interrupt at y-axis @param channel_z @n INTERRUPT_Z_ENABLE Enable low threshold interrupt at z-axis @n INTERRUPT_Z_DISABLE Disable low threshold interrupt at z-axis @param low_threshold Low threshold, default to expand 16 times, for example: if the threshold is set to be 1, actually the geomagnetic data below 16 will trigger an interrupt @param polarity @n POLARITY_HIGH High polarity @n POLARITY_LOW Low polarity ''' if low_threshold < 0: self.__txbuf[0] = (low_threshold*-1) | 0x80 else: self.__txbuf[0] = low_threshold self.write_reg(self.REG_LOW_THRESHOLD ,self.__txbuf) rslt = self.read_reg(self.REG_INT_CONFIG, 1) if channel_x == self.INTERRUPT_X_DISABLE: self.__txbuf[0] = rslt[0] | 0x01 else: self.__txbuf[0] = rslt[0] & 0xFE if channel_y == self.INTERRUPT_Y_DISABLE: self.__txbuf[0] = self.__txbuf[0] | 0x02 else: self.__txbuf[0] = self.__txbuf[0] & 0xFC if channel_x == self.INTERRUPT_X_DISABLE: self.__txbuf[0] = self.__txbuf[0] | 0x04 else: self.__txbuf[0] = self.__txbuf[0] & 0xFB self.write_reg(self.REG_INT_CONFIG ,self.__txbuf) self.set_interrupt_pin(self.ENABLE_INTERRUPT_PIN, polarity) def get_low_threshold_interrupt_state(self): '''! @brief Get the status of low threshold interrupt, which axis triggered the low threshold interrupt @return status The returned number indicate the low threshold interrupt occur at which axis @n bit0 is 1 indicate the interrupt occur at x-axis @n bit1 is 1 indicate the interrupt occur at y-axis @n bit2 is 1 indicate the interrupt occur at z-axis @n ------------------------------------ @n | bit7 ~ bit3 | bit2 | bit1 | bit0 | @n ------------------------------------ @n | reserved | 0 | 0 | 0 | @n ------------------------------------ ''' rslt = self.read_reg(self.REG_INTERRUPT_STATUS, 1) return rslt[0]&0x07 def set_high_threshold_interrupt(self, channel_x, channel_y, channel_z, high_threshold, polarity): '''! @brief Set high threshold interrupt, an interrupt is triggered when the geomagnetic value of a channel is beyond the threshold, the threshold is default to expand 16 times @n There will be level change when INT pin interrupt occurred @n High pin polarity: active on high, the default is low level, which will jump when the threshold is triggered. @n Low pin polarity: active on low, the default is high level, which will jump when the threshold is triggered. @param channel_x @n INTERRUPT_X_ENABLE Enable high threshold interrupt at x-axis @n INTERRUPT_X_DISABLE Disable high threshold interrupt at x-axis @param channel_y @n INTERRUPT_Y_ENABLE Enable high threshold interrupt at y-axis @n INTERRUPT_Y_DISABLE Disable high threshold interrupt at y-axis @param channel_z @n INTERRUPT_Z_ENABLE Enable high threshold interrupt at z-axis @n INTERRUPT_Z_DISABLE Disable high threshold interrupt at z-axis @param high_threshold High threshold, default to expand 16 times, for example: if the threshold is set to be 1, actually the geomagnetic data beyond 16 will trigger an interrupt @param polarity @n POLARITY_HIGH High polarity @n POLARITY_LOW Low polarity ''' if high_threshold < 0: self.__txbuf[0] = (high_threshold*-1) | 0x80 else: self.__txbuf[0] = high_threshold self.write_reg(self.REG_HIGH_THRESHOLD, self.__txbuf) rslt = self.read_reg(self.REG_INT_CONFIG, 1) if channel_x == self.HIGH_INTERRUPT_X_DISABLE: self.__txbuf[0] = rslt[0] | 0x08 else: self.__txbuf[0] = rslt[0] & 0xF7 if channel_y == self.HIGH_INTERRUPT_Y_DISABLE: self.__txbuf[0] = self.__txbuf[0] | 0x10 else: self.__txbuf[0] = self.__txbuf[0] & 0xEF if channel_x == self.HIGH_INTERRUPT_X_DISABLE: self.__txbuf[0] = self.__txbuf[0] | 0x20 else: self.__txbuf[0] = self.__txbuf[0] & 0xDf self.write_reg(self.REG_INT_CONFIG ,self.__txbuf) self.set_interrupt_pin(self.ENABLE_INTERRUPT_PIN, polarity) def get_high_threshold_interrupt_state(self): '''! @brief Get the status of high threshold interrupt, which axis triggered the high threshold interrupt @return status The returned number indicate the high threshold interrupt occur at which axis @n bit0 is 1 indicate the interrupt occur at x-axis @n bit1 is 1 indicate the interrupt occur at y-axis @n bit2 is 1 indicate the interrupt occur at z-axis @n ------------------------------------ @n | bit7 ~ bit3 | bit2 | bit1 | bit0 | @n ------------------------------------ @n | reserved | 0 | 0 | 0 | @n ------------------------------------ ''' rslt = self.read_reg(self.REG_INTERRUPT_STATUS, 1) return (rslt[0]&0x38)>>3 class DFRobot_bmm150_I2C(DFRobot_bmm150): '''! @brief An example of an i2c interface module ''' def __init__(self, bus, addr): self.__addr = addr super(DFRobot_bmm150_I2C, self).__init__(bus) def write_reg(self, reg, data): '''! @brief writes data to a register @param reg register address @param data written data ''' while 1: try: self.i2cbus.write_i2c_block_data(self.__addr, reg, data) return except: print("please check connect!") #os.system('i2cdetect -y 1') time.sleep(1) return def read_reg(self, reg ,len): '''! @brief read the data from the register @param reg register address @param len read data length ''' while 1: try: rslt = self.i2cbus.read_i2c_block_data(self.__addr, reg, len) #print rslt return rslt except: time.sleep(1) print("please check connect!") class DFRobot_bmm150_SPI(DFRobot_bmm150): def __init__(self, cs, bus = 0, dev = 0, speed = 1000000): self.__cs = cs GPIO.setup(self.__cs, GPIO.OUT) GPIO.output(self.__cs, GPIO.LOW) self.__spi = spidev.SpiDev() self.__spi.open(bus, dev) self.__spi.no_cs = True self.__spi.max_speed_hz =
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r11c1 = request.POST.get('r11c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r11c2 = request.POST.get('r11c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r11c3 = request.POST.get('r11c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r12c1 = request.POST.get('r12c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r12c2 = request.POST.get('r12c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r12c3 = request.POST.get('r12c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r13c1 = request.POST.get('r13c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r13c2 = request.POST.get('r13c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r13c3 = request.POST.get('r13c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r14c1 = request.POST.get('r14c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r14c2 = request.POST.get('r14c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r14c3 = request.POST.get('r14c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r15c1 = request.POST.get('r15c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r15c2 = request.POST.get('r15c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r15c3 = request.POST.get('r15c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r16c1 = request.POST.get('r16c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r16c2 = request.POST.get('r16c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r16c3 = request.POST.get('r16c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r17c1 = request.POST.get('r17c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r17c2 = request.POST.get('r17c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r17c3 = request.POST.get('r17c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r18c1 = request.POST.get('r18c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r18c2 = request.POST.get('r18c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r18c3 = request.POST.get('r18c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r19c1 = request.POST.get('r19c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r19c2 = request.POST.get('r19c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r19c3 = request.POST.get('r19c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r20c1 = request.POST.get('r20c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r20c2 = request.POST.get('r20c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r20c3 = request.POST.get('r20c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r21c1 = request.POST.get('r21c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r21c2 = request.POST.get('r21c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r21c3 = request.POST.get('r21c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r22c1 = request.POST.get('r22c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r22c2 = request.POST.get('r22c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r22c3 = request.POST.get('r22c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r23c1 = request.POST.get('r23c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r23c2 = request.POST.get('r23c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r23c3 = request.POST.get('r23c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r24c1 = request.POST.get('r24c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r24c2 = request.POST.get('r24c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r24c3 = request.POST.get('r24c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r25c1 = request.POST.get('r25c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r25c2 = request.POST.get('r25c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r25c3 = request.POST.get('r25c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r26c1 = request.POST.get('r26c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r26c2 = request.POST.get('r26c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r26c3 = request.POST.get('r26c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r27c1 = request.POST.get('r27c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r27c2 = request.POST.get('r27c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r27c3 = request.POST.get('r27c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r28c1 = request.POST.get('r28c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r28c2 = request.POST.get('r28c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r28c3 = request.POST.get('r28c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r29c1 = request.POST.get('r29c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r29c2 = request.POST.get('r29c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r29c3 = request.POST.get('r29c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r30c1 = request.POST.get('r30c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r30c2 = request.POST.get('r30c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r30c3 = request.POST.get('r30c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') body = '<!doctype html>' + \ '<html lang="en">' + \ '<head>' + \ '<meta charset="utf-8">' + \ '<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \ '<link rel="stylesheet"' + \ 'href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css"' + \ 'integrity="<KEY>"' + \ 'crossorigin="anonymous">' + \ '<title>Operational budget</title>' + \ '</head>' + \ '<body>' + \ '<div class="container">' + \ '<div class="card text-center">' + \ '<div class="card-header text-center">Operational budget</div>' + \ '<div class="card-body">' body += '<h6>Comapny name : ' + company_name + '</h6>' + \ '<h6>Share capital : ' + share_capital + '</h6>' + \ '<h6>Head office address : ' + head_office_address + '</h6>' + \ '<h6>Establishment number : ' + establishment_number + '</h6>' + \ '<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \ '<h6>Main activities : ' + main_activities + '</h6>' + \ '<h6>Activity number : ' + activity_number + '</h6>' + \ '<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \ '<h6>President : ' + president + '</h6>' + \ '<h6>Registration date : ' + registration_date + '</h6>' + \ '<br>' body += '<br>' body += '<table class="table table-striped table-bordered">' + \ '<thead>' + \ '<tr>' + \ '<th scope="col">Details</th>' + \ '<th scope="col">Estimate</th>' + \ '<th scope="col">Actual</th>' + \ '<th scope="col">Variance</th>' + \ '</tr>' + \ '</thead>' + \ '<tbody>' + \ '<tr>' + \ '<td>Sales revenue</td>' + \ '<td>' + r1c1 + '</td>' + \ '<td>' + r1c2 + '</td>' + \ '<td>' + r1c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Grants revenue</td>' + \ '<td>' + r2c1 + '</td>' + \ '<td>' + r2c2 + '</td>' + \ '<td>' + r2c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Donations revenue</td>' + \ '<td>' + r3c1 + '</td>' + \ '<td>' + r3c2 + '</td>' + \ '<td>' + r3c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Gifts revenue</td>' + \ '<td>' + r4c1 + '</td>' + \ '<td>' + r4c2 + '</td>' + \ '<td>' + r4c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Interest revenue</td>' + \ '<td>' + r5c1 + '</td>' + \ '<td>' + r5c2 + '</td>' + \ '<td>' + r5c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Reimbursement revenue</td>' + \ '<td>' + r6c1 + '</td>' + \ '<td>' + r6c2 + '</td>' + \ '<td>' + r6c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Rent revenue</td>' + \ '<td>' + r7c1 + '</td>' + \ '<td>' + r7c2 + '</td>' + \ '<td>' + r7c3 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Investment
<reponame>guillaume-florent/PyGeM """ Utilities for reading and writing parameters files to perform FFD geometrical morphing. """ try: import configparser as configparser except ImportError: import ConfigParser as configparser import os import numpy as np from OCC.Bnd import Bnd_Box from OCC.BRepBndLib import brepbndlib_Add from OCC.BRepMesh import BRepMesh_IncrementalMesh import vtk import pygem.affine as at class FFDParameters(object): """ Class that handles the Free Form Deformation parameters in terms of FFD bounding box and weight of the FFD control points. :param list n_control_points: number of control points in the x, y, and z direction. If not provided it is set to [2, 2, 2]. :cvar numpy.ndarray length_box: dimension of the FFD bounding box, in the x, y and z direction (local coordinate system). :cvar numpy.ndarray origin_box: the x, y and z coordinates of the origin of the FFD bounding box. :cvar numpy.ndarray rot_angle: rotation angle around x, y and z axis of the FFD bounding box. :cvar numpy.ndarray n_control_points: the number of control points in the x, y, and z direction. :cvar numpy.ndarray array_mu_x: collects the displacements (weights) along x, normalized with the box lenght x. :cvar numpy.ndarray array_mu_y: collects the displacements (weights) along y, normalized with the box lenght y. :cvar numpy.ndarray array_mu_z: collects the displacements (weights) along z, normalized with the box lenght z. :Example: from file >>> import pygem.params as ffdp >>> >>> # Reading an existing file >>> params1 = ffdp.FFDParameters() >>> params1.read_parameters( >>> filename='tests/test_datasets/parameters_test_ffd_identity.prm') >>> >>> # Creating a default parameters file with the right dimensions (if the >>> # file does not exists it is created with that name). So it is possible >>> # to manually edit it and read it again. >>> params2 = ffdp.FFDParameters(n_control_points=[2, 3, 2]) >>> params2.read_parameters(filename='parameters_test.prm') >>> >>> # Creating bounding box of the given shape >>> from OCC.IGESControl import IGESControl_Reader >>> params3 = ffdp.FFDParameters() >>> reader = IGESControl_Reader() >>> reader.ReadFile('tests/test_datasets/test_pipe.igs') >>> reader.TransferRoots() >>> shape = reader.Shape() >>> params3.build_bounding_box(shape) .. note:: Four vertex (non coplanar) are sufficient to uniquely identify a parallelepiped. If the four vertex are coplanar, an assert is thrown when affine_points_fit is used. """ def __init__(self, n_control_points=None): self.conversion_unit = 1. self.lenght_box = np.array([1., 1., 1.]) self.origin_box = np.array([0., 0., 0.]) self.rot_angle = np.array([0., 0., 0.]) if n_control_points is None: n_control_points = [2, 2, 2] self.n_control_points = np.array(n_control_points) self.array_mu_x = np.zeros(self.n_control_points) self.array_mu_y = np.zeros(self.n_control_points) self.array_mu_z = np.zeros(self.n_control_points) @property def psi_mapping(self): """ Map from the physical domain to the reference domain. :rtype: numpy.ndarray """ return np.diag(np.reciprocal(self.lenght_box)) @property def inv_psi_mapping(self): """ Map from the reference domain to the physical domain. :rtype: numpy.ndarray """ return np.diag(self.lenght_box) @property def rotation_matrix(self): """ The rotation matrix (according to rot_angle_x, rot_angle_y, rot_angle_z). :rtype: numpy.ndarray """ return at.angles2matrix( np.radians(self.rot_angle[2]), np.radians(self.rot_angle[1]), np.radians(self.rot_angle[0])) @property def position_vertices(self): """ The position of the vertices of the FFD bounding box. :rtype: numpy.ndarray """ return self.origin_box + np.vstack([ np.zeros((1, 3)), self.rotation_matrix.dot(np.diag(self.lenght_box)).T ]) def read_parameters(self, filename='parameters.prm'): """ Reads in the parameters file and fill the self structure. :param string filename: parameters file to be read in. """ if not isinstance(filename, str): raise TypeError("filename must be a string") # Checks if the parameters file exists. If not it writes the default # class into filename. if not os.path.isfile(filename): self.write_parameters(filename) return config = configparser.RawConfigParser() config.read(filename) self.n_control_points[0] = config.getint('Box info', 'n control points x') self.n_control_points[1] = config.getint('Box info', 'n control points y') self.n_control_points[2] = config.getint('Box info', 'n control points z') self.lenght_box[0] = config.getfloat('Box info', 'box lenght x') self.lenght_box[1] = config.getfloat('Box info', 'box lenght y') self.lenght_box[2] = config.getfloat('Box info', 'box lenght z') self.origin_box[0] = config.getfloat('Box info', 'box origin x') self.origin_box[1] = config.getfloat('Box info', 'box origin y') self.origin_box[2] = config.getfloat('Box info', 'box origin z') self.rot_angle[0] = config.getfloat('Box info', 'rotation angle x') self.rot_angle[1] = config.getfloat('Box info', 'rotation angle y') self.rot_angle[2] = config.getfloat('Box info', 'rotation angle z') self.array_mu_x = np.zeros(self.n_control_points) self.array_mu_y = np.zeros(self.n_control_points) self.array_mu_z = np.zeros(self.n_control_points) mux = config.get('Parameters weights', 'parameter x') muy = config.get('Parameters weights', 'parameter y') muz = config.get('Parameters weights', 'parameter z') for line in mux.split('\n'): values = np.array(line.split()) self.array_mu_x[tuple(map(int, values[0:3]))] = float(values[3]) for line in muy.split('\n'): values = line.split() self.array_mu_y[tuple(map(int, values[0:3]))] = float(values[3]) for line in muz.split('\n'): values = line.split() self.array_mu_z[tuple(map(int, values[0:3]))] = float(values[3]) def write_parameters(self, filename='parameters.prm'): """ This method writes a parameters file (.prm) called `filename` and fills it with all the parameters class members. :param string filename: parameters file to be written out. """ if not isinstance(filename, str): raise TypeError("filename must be a string") output_string = "" output_string += '\n[Box info]\n' output_string += '# This section collects all the properties of the' output_string += ' FFD bounding box.\n' output_string += '\n# n control points indicates the number of control' output_string += ' points in each direction (x, y, z).\n' output_string += '# For example, to create a 2 x 3 x 2 grid, use the' output_string += ' following: n control points: 2, 3, 2\n' output_string += 'n control points x: ' + str( self.n_control_points[0]) + '\n' output_string += 'n control points y: ' + str( self.n_control_points[1]) + '\n' output_string += 'n control points z: ' + str( self.n_control_points[2]) + '\n' output_string += '\n# box lenght indicates the length of the FFD ' output_string += 'bounding box along the three canonical directions ' output_string += '(x, y, z).\n' output_string += '# It uses the local coordinate system.\n' output_string += '# For example to create a 2 x 1.5 x 3 meters box ' output_string += 'use the following: lenght box: 2.0, 1.5, 3.0\n' output_string += 'box lenght x: ' + str(self.lenght_box[0]) + '\n' output_string += 'box lenght y: ' + str(self.lenght_box[1]) + '\n' output_string += 'box lenght z: ' + str(self.lenght_box[2]) + '\n' output_string += '\n# box origin indicates the x, y, and z coordinates ' output_string += 'of the origin of the FFD bounding box. That is ' output_string += 'center of\n' output_string += '# rotation of the bounding box. It corresponds to ' output_string += 'the point coordinates with position [0][0][0].\n' output_string += '# See section "Parameters weights" for more ' output_string += 'details.\n' output_string += '# For example, if the origin is equal to 0., 0., 0., ' output_string += 'use the following: origin box: 0., 0., 0.\n' output_string += 'box origin x: ' + str(self.origin_box[0]) + '\n' output_string += 'box origin y: ' + str(self.origin_box[1]) + '\n' output_string += 'box origin z: ' + str(self.origin_box[2]) + '\n' output_string += '\n# rotation angle indicates the rotation angle ' output_string += 'around the x, y, and z axis of the FFD bounding box ' output_string += 'in degrees.\n' output_string += '# The rotation is done with respect to the box ' output_string += 'origin.\n' output_string += '# For example, to rotate the box by 2 deg along ' output_string += 'the z ' output_string += 'direction, use the following: rotation angle: ' output_string += '0., 0., 2.\n' output_string += 'rotation angle x: ' + str(self.rot_angle[0]) + '\n' output_string += 'rotation angle y: ' + str(self.rot_angle[1]) + '\n' output_string += 'rotation angle z: ' + str(self.rot_angle[2]) + '\n' output_string += '\n\n[Parameters weights]\n' output_string += '# This section describes the weights of the FFD ' output_string += 'control points.\n' output_string += '# We adopt the following convention:\n' output_string += '# For example with a 2x2x2 grid of control points we ' output_string += 'have to fill a 2x2x2 matrix of weights.\n' output_string += '# If a weight is equal to zero you can discard the ' output_string += 'line since the default is zero.\n' output_string += '#\n' output_string += '# | x index | y index | z index | weight |\n' output_string += '# --------------------------------------\n' output_string += '# | 0 | 0 | 0 | 1.0 |\n' output_string += '# | 0 | 1 | 1 | 0.0 | --> you ' output_string += 'can erase this line without effects\n' output_string += '# | 0 | 1 | 0 | -2.1 |\n' output_string += '# | 0 | 0 | 1
= pd.DataFrame(con_df[con_df.columns[2]]).astype('float') con_df.columns = [3] syn_df = pd.DataFrame(syn_df[syn_df.columns[2]]).astype('float') syn_df.columns = [4] df = pd.concat([head_df, wei_df, con_df, syn_df],axis=1) return df def save(save_to=None): if not save_to: save_to = filename.get() df = get_whole_df() (nr,nc) = df.shape tb = df.to_csv(sep=' ',header=False,index=False,float_format='%.6f') file = open(save_to,"w") file.write(str(nr)+'\n') file.write(tb) file.close() display_app_status('Connections Data file \"'+filename.get()+'\" saved') return def new(): if synaptic_weight_page_obj.has_changed() or convergence_page_obj.has_changed() or synapses_page_obj.has_changed(): result = messagebox.askquestion("New", "Are you sure? Data has been changed.", icon='warning') if result != 'yes': return d = DialogEntryBox(root,text="New File Name:",lefttext=os.path.join(dataset_folder, conndata_file_prefix),righttext=conndata_file_postfix) root.wait_window(d.top) if d.confirm==False: return #get all as presynaptic #get all not artificial as postsynaptic newfilename = os.path.join(dataset_folder,conndata_file_prefix+ d.value.get() + conndata_file_postfix) loaded_cellnums = get_public_param("loaded_cellnums") column_names = ["Friendly Cell Name", "Cell File Name", "Num Cells", "Layer Index","Artificial:1 Real:0"] cellnums_pd = pd.read_csv(loaded_cellnums ,delimiter=' ',\ skiprows=1,header=None,\ names = column_names) cellnums_pd[column_names[4]] = cellnums_pd[column_names[4]].astype(int) pre = cellnums_pd[cellnums_pd.columns[0]].values.tolist() (pre_nr,pre_nc) = pd.DataFrame(pre).shape post = cellnums_pd.loc[cellnums_pd[column_names[4]] == 0] post = post[post.columns[0]] post = pd.DataFrame(post) (post_nr,post_nc) = post.shape post = np.repeat(post[post.columns[0]],pre_nr).reset_index(drop=True) pre = pd.DataFrame(pre*post_nr) df = pd.concat([pre,post],axis=1) df[2] = '0.0' df[3] = '0' df[4] = '0' tb = df.to_csv(sep=' ',header=False,index=False,float_format='%.6f') file = open(newfilename,"w") file.write(str(pre_nr*post_nr)+'\n') file.write(tb) file.close() load(load_from=newfilename) reload_files_and_set(newfilename) #create a presynaptic*postsynaptic by 5 pandas dataframe #set all values to zero #set first column #set second column display_app_status('Connections Data file \"'+filename.get()+'\" created') return def new_clone_current(): if synaptic_weight_page_obj.has_changed() or convergence_page_obj.has_changed() or synapses_page_obj.has_changed(): result = messagebox.askquestion("New", "Are you sure? Data has been changed.", icon='warning') if result != 'yes': return d = DialogEntryBox(root,text="New File Name:",lefttext=os.path.join(dataset_folder, conndata_file_prefix),righttext=conndata_file_postfix) root.wait_window(d.top) if d.confirm==False: return newfilename = os.path.join(dataset_folder,conndata_file_prefix+ d.value.get() + conndata_file_postfix) f = open(newfilename,"w+") f.close() save(save_to=newfilename) reload_files_and_set() display_app_status('Connections Data file \"'+filename.get()+'\" was created') return def set_conndata_param(): fn = filename.get() search = conndata_file_prefix+'(.+?)'+conndata_file_postfix m = re.search(search,fn) if m: fn = m.group(1) set_public_param("ConnData", fn) display_app_status('ConnData parameter set to \"'+ filename.get() +'\" in current parameters file') return def reload_files_and_set(newfilename): m = fileMenu.children['menu'] m.delete(0,tk.END) newvalues = options newvalues.append(newfilename) for val in newvalues: m.add_command(label=val,command=lambda v=filename,l=val:v.set(l)) filename.set(newfilename) def delete_current_file(): return def load_conndata_param(): conndat = get_public_param("ConnData") conndat = os.path.join(dataset_folder, conndata_file_prefix + conndat + conndata_file_postfix) #filename.set('') filename.set(conndat) #generate_files_available() #Create the choice option panel filename = tk.StringVar(top_option_frame) filename.trace("w",load) filename.set('') #filename.set(options[0]) newFromCellsButton = tk.Button(top_option_frame, text="Generate New from Current Cells File", command=new, width=30) newFromCellsButton.grid(column=0, row =0, padx=5, sticky='WE',columnspan=2) useButton = tk.Button(top_option_frame, text="Set as ConnData", command=set_conndata_param, width=15) useButton.grid(column=0, row =1, padx=5, sticky='W') loadButton = tk.Button(top_option_frame, text="Load ConnData", command=load_conndata_param,width=15) loadButton.grid(column=1, row =1, padx=5, sticky='W') fileMenu = tk.OptionMenu(top_option_frame, filename, *options) fileMenu.grid(column=2, row =0, padx=5, sticky='WE',columnspan=2) deleteButton = tk.Button(top_option_frame, text="Delete", command=delete_current_file) deleteButton.grid(column=4, row =0, padx=5, pady=5, sticky='W') deleteButton.config(state=tk.DISABLED) saveButton = tk.Button(top_option_frame, text="Save", command=save) saveButton.grid(column=2, row =1, padx=5,pady=5, sticky='WE') newFromCurrentButton = tk.Button(top_option_frame, text="Save As", command=new_clone_current) newFromCurrentButton.grid(column=3, row =1, padx=5, sticky='WE') def synapses_page(root): sections_list = ['dendrite_list','soma_list','apical_list','axon_list'] synapse_type_list = ['MyExp2Sid','ExpGABAab','Custom'] condition_list = ['distance(x)','y3d(x)'] synapse_column_names = ["Postsynaptic Cell", "Presynaptic Cells",\ "Synapse Type", "Postsynaptic Section Target",\ "Condition 1", "Condition 2",\ "Tau1a/modfile", "Tau2a/cust1", "ea/cust2",\ "Tau1b/cust3", "Tau2b/cust4", "eb/cust5"] mod_list = glob.glob(mods_glob) if len(mod_list) is 0: mod_list.append('') class synapses_adapter(object): def __init__(self, root): self.root = root self.pt = PandasTable(self.root, show_add_row_button=False) self.pt.pack() def read_internal(self, df): '''get whole dataframe''' return df def get_df(self): df = self.pt.get_dataframe().replace(np.nan, '', regex=True).replace('nan','',regex=True) return df def refresh(self, df): d = defaultdict(list) d[3].append(sections_list) self.pt.set_dataframe(self.read_internal(df), show_delete_row=True,\ show_header=True, show_numbering=True, \ first_column_is_id=False, immutable_values=["nan"],\ options_dict=d) self.pt.pack() def add_row(self, row): row = np.array(row).reshape(-1,len(row)) r = pd.DataFrame(row,columns=synapse_column_names) for i, row in r.iterrows(): self.pt.add_row(row) return def has_changed(self): return self.pt.has_changed() class SynapseEntryBox: def __init__(self, parent, text="value", lefttext="",righttext=""): top = self.top = tk.Toplevel(parent) top.geometry('475x475') top.resizable(0,0) tk.Label(top, text='Create new synapse:\nValues from currently loaded cells file.').grid(row=0,column=0,sticky="WE",columnspan=2) core_extras = tk.Frame(top) gaba_extras = tk.Frame(top) custom_extras = tk.Frame(top) def showhide_gaba_extras(*args): if self.syntype_value.get() == synapse_type_list[1]: custom_extras.grid_forget() core_extras.grid(row=7,column=0,columnspan=2) gaba_extras.grid(row=10,column=0,columnspan=2) elif self.syntype_value.get() == synapse_type_list[2]: core_extras.grid_forget() gaba_extras.grid_forget() custom_extras.grid(row=7,column=0,columnspan=4) return else: core_extras.grid(row=7,column=0,columnspan=2) custom_extras.grid_forget() gaba_extras.grid_forget() return core_extras.grid(row=7,column=0,columnspan=2) self.pre_value = tk.StringVar(top) self.post_value = tk.StringVar(top) self.syntype_value = tk.StringVar(top) self.section_value = tk.StringVar(top) self.cond1_value = tk.StringVar(top) self.cond1_text_value = tk.StringVar(top) self.cond2_value = tk.StringVar(top) self.cond2_text_value = tk.StringVar(top) self.tau1a_value = tk.StringVar(top) self.tau2a_value = tk.StringVar(top) self.ea_value = tk.StringVar(top) self.tau1b_value = tk.StringVar(top) self.tau2b_value = tk.StringVar(top) self.eb_value = tk.StringVar(top) self.custom_mod_value = tk.StringVar(top) self.custom1_value = tk.StringVar(top) self.custom2_value = tk.StringVar(top) self.custom3_value = tk.StringVar(top) self.custom4_value = tk.StringVar(top) self.custom5_value = tk.StringVar(top) self.custom6_value = tk.StringVar(top) self.custom1_value2 = tk.StringVar(top) self.custom2_value2 = tk.StringVar(top) self.custom3_value2 = tk.StringVar(top) self.custom4_value2 = tk.StringVar(top) self.custom5_value2 = tk.StringVar(top) self.custom6_value2 = tk.StringVar(top) self.confirm = False #Inputs loaded_cellnums = get_public_param("loaded_cellnums") column_names = ["Friendly Cell Name", "Cell File Name", "Num Cells", "Layer Index","Artificial:1 Real:0"] cellnums_pd = pd.read_csv(loaded_cellnums ,delimiter=' ',\ skiprows=1,header=None,\ names = column_names) cellnums_pd[column_names[4]] = cellnums_pd[column_names[4]].astype(int) pre_options = cellnums_pd[cellnums_pd.columns[0]].values.tolist() post_options = cellnums_pd.loc[cellnums_pd[column_names[4]] == 0] post_options = post_options[post_options.columns[0]].values.tolist() l = tk.Label(top, text='Presynaptic Cell',width=25, background='light gray') l.grid(row=1,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) #self.pre = tk.Entry(top,textvariable=self.pre_value) self.pre = tk.OptionMenu(top, self.pre_value, *pre_options) self.pre.grid(row=1,column=1) l = tk.Label(top, text='Postsynaptic Cell',width=25, background='light gray') l.grid(row=2,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) #self.post = tk.Entry(top,textvariable=self.post_value) self.post = tk.OptionMenu(top, self.post_value, *post_options) self.post.grid(row=2,column=1) l = tk.Label(top, text='Synapse Type',width=25, background='light gray') l.grid(row=3,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) #self.syntype = tk.Entry(top,textvariable=self.syntype_value) self.syntype = tk.OptionMenu(top, self.syntype_value, *synapse_type_list) self.syntype_value.trace("w",showhide_gaba_extras) self.syntype_value.set(synapse_type_list[0]) self.syntype.grid(row=3,column=1) l = tk.Label(top, text='Postsynaptic Section Target',width=25, background='light gray') l.grid(row=4,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) #self.section = tk.Entry(top,textvariable=self.section_value) self.section = tk.OptionMenu(top, self.section_value, *sections_list) self.section.grid(row=4,column=1) l = tk.Label(top, text='Condition 1',width=25, background='light gray') l.grid(row=5,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) #self.cond1 = tk.Entry(top,textvariable=self.cond1_value) self.cond1 = tk.OptionMenu(top, self.cond1_value, *condition_list) self.cond1_value.set(condition_list[0]) self.cond1.grid(row=5,column=1) tk.Label(top, text=' > ').grid(row=5, column=2) self.cond1_text = tk.Entry(top, textvariable=self.cond1_text_value) self.cond1_text_value.set('-1') self.cond1_text.grid(row=5, column=3) l = tk.Label(top, text='Condition 2',width=25, background='light gray') l.grid(row=6,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) #self.cond2 = tk.Entry(top,textvariable=self.cond2_value) self.cond2 = tk.OptionMenu(top, self.cond2_value, *condition_list) self.cond2_value.set(condition_list[0]) self.cond2.grid(row=6,column=1) tk.Label(top, text=' < ').grid(row=6, column=2) self.cond2_text = tk.Entry(top, textvariable=self.cond2_text_value) self.cond2_text_value.set('10000') self.cond2_text.grid(row=6, column=3) l = tk.Label(core_extras, text='Tau1a',width=25, background='light gray') l.grid(row=7,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.tau1a = tk.Entry(core_extras,textvariable=self.tau1a_value) self.tau1a_value.set('2.0') self.tau1a.grid(row=7,column=1) l = tk.Label(core_extras, text='Tau2a',width=25, background='light gray') l.grid(row=8,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.tau2a = tk.Entry(core_extras,textvariable=self.tau2a_value) self.tau2a_value.set('6.3') self.tau2a.grid(row=8,column=1) l = tk.Label(core_extras, text='ea',width=25, background='light gray') l.grid(row=9,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.ea = tk.Entry(core_extras,textvariable=self.ea_value) self.ea_value.set('0.0') self.ea.grid(row=9,column=1) #GABA EXTRAS l = tk.Label(gaba_extras, text='Tau1b',width=25, background='light gray') l.grid(row=10,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.tau1b = tk.Entry(gaba_extras,textvariable=self.tau1b_value) self.tau1b.grid(row=10,column=1) l = tk.Label(gaba_extras, text='Tau2b',width=25, background='light gray') l.grid(row=11,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.tau2b = tk.Entry(gaba_extras,textvariable=self.tau2b_value) self.tau2b.grid(row=11,column=1) l = tk.Label(gaba_extras, text='eb',width=25, background='light gray') l.grid(row=12,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.eb = tk.Entry(gaba_extras,textvariable=self.eb_value) self.eb.grid(row=12,column=1) #CUSTOM EXTRAS l = tk.Label(custom_extras, text='Synapse Mod File',width=25, background='light gray') l.grid(row=7,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.custom_mod = tk.OptionMenu(custom_extras, self.custom_mod_value, *mod_list) self.custom_mod_value.set('') self.custom_mod.grid(row=7,column=1) tk.Label(custom_extras, text=' Syn."parameter" ').grid(row=8, column=1) tk.Label(custom_extras, text=' : ').grid(row=8, column=2) tk.Label(custom_extras, text=' "value" ').grid(row=8, column=3) l = tk.Label(custom_extras, text='Custom Parameter 1',width=25, background='light gray') l.grid(row=9,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.custom1 = tk.Entry(custom_extras,textvariable=self.custom1_value) self.custom1.grid(row=9,column=1) tk.Label(custom_extras, text=' : ').grid(row=9, column=2) self.custom1_text = tk.Entry(custom_extras, textvariable=self.custom1_value2) self.custom1_text.grid(row=9, column=3) l = tk.Label(custom_extras, text='Custom Parameter 2',width=25, background='light gray') l.grid(row=10,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.custom2 = tk.Entry(custom_extras,textvariable=self.custom2_value) self.custom2.grid(row=10,column=1) tk.Label(custom_extras, text=' : ').grid(row=10, column=2) self.custom2_text = tk.Entry(custom_extras, textvariable=self.custom2_value2) self.custom2_text.grid(row=10, column=3) l = tk.Label(custom_extras, text='Custom Parameter 3',width=25, background='light gray') l.grid(row=11,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.custom3 = tk.Entry(custom_extras,textvariable=self.custom3_value) self.custom3.grid(row=11,column=1) tk.Label(custom_extras, text=' : ').grid(row=11, column=2) self.custom3_text = tk.Entry(custom_extras, textvariable=self.custom3_value2) self.custom3_text.grid(row=11, column=3) l = tk.Label(custom_extras, text='Custom Parameter 4',width=25, background='light gray') l.grid(row=12,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.custom4 = tk.Entry(custom_extras,textvariable=self.custom4_value) self.custom4.grid(row=12,column=1) tk.Label(custom_extras, text=' : ').grid(row=12, column=2) self.custom4_text = tk.Entry(custom_extras, textvariable=self.custom4_value2) self.custom4_text.grid(row=12, column=3) l = tk.Label(custom_extras, text='Custom Parameter 5',width=25, background='light gray') l.grid(row=13,column=0,pady=5,padx=5) l.config(relief=tk.GROOVE) self.custom5 = tk.Entry(custom_extras,textvariable=self.custom5_value) self.custom5.grid(row=13,column=1) tk.Label(custom_extras, text=' : ').grid(row=13, column=2) self.custom5_text = tk.Entry(custom_extras, textvariable=self.custom5_value2) self.custom5_text.grid(row=13, column=3) #Return button_frame = tk.Frame(top) button_frame.grid(row=20,column=0,columnspan=2) b = tk.Button(button_frame, text="Ok", command=self.ok) b.grid(pady=5, padx=5, column=0, row=0, sticky="WE") b = tk.Button(button_frame, text="Cancel", command=self.cancel) b.grid(pady=5, padx=5, column=1, row=0, sticky="WE") def verify_good(self): return True def get_values(self): if self.syntype_value.get() == "Custom": if self.custom_mod_value.get() is '': self.custom_mod_value.set('none') if self.custom1_value.get() is '' or self.custom1_value2.get() is '': self.custom1_value.set('none') self.custom1_value2.set('none') if self.custom2_value.get() is '' or self.custom2_value2.get() is '': self.custom2_value.set('none') self.custom2_value2.set('none') if self.custom3_value.get() is '' or self.custom3_value2.get() is '': self.custom3_value.set('none') self.custom3_value2.set('none') if self.custom4_value.get() is '' or self.custom4_value2.get() is '': self.custom4_value.set('none') self.custom4_value2.set('none') if self.custom5_value.get() is '' or self.custom5_value2.get() is '': self.custom5_value.set('none') self.custom5_value2.set('none') if self.custom6_value.get() is '' or self.custom6_value2.get() is '': self.custom6_value.set('none') self.custom6_value2.set('none')
len(daids) == 1 and daids[0] == qaids[0] flags.append(flag) if len(flags) > 0 and all(flags): raise AssertionError('No need to compute a query against itself') cfgdict, review_cfg = back.confirm_query_dialog2( species2_expanded_aids, query_msg=query_msg, query_title=query_title, cfgdict=cfgdict, review_cfg=review_cfg, ) logger.info('cfgdict = %r' % (cfgdict,)) prog_bar = back.front.prog_bar prog_bar.setVisible(True) prog_bar.setWindowTitle('Initialize query') prog_hook = prog_bar.utool_prog_hook # prog_bar = guitool.newProgressBar(None) # back.front) # Doesn't seem to work correctly # prog_hook.show_indefinite_progress() prog_hook.force_event_update() # prog_hook.set_progress(0) prog_bar.setWindowTitle('Start query') # import utool # utool.embed() query_results = {} for key, (qaids, daids) in ut.ProgressIter( species2_expanded_aids.items(), prog_hook=prog_hook ): prog_bar.setWindowTitle('Initialize %r query' % (key,)) qreq_ = back.ibs.new_query_request(qaids, daids, cfgdict=cfgdict) prog_hook.initialize_subhooks(1) subhook = prog_hook.next_subhook() cm_list = qreq_.execute(prog_hook=subhook) query_results[key] = (cm_list, qreq_) # HACK IN IMAGESET INFO if daids_mode == const.INTRA_OCCUR_KEY: for cm in cm_list: # if cm is not None: cm.imgsetid = imgsetid back.front.prog_bar.setVisible(False) logger.info('[back] About to finish compute_queries: imgsetid=%r' % (imgsetid,)) for key in query_results.keys(): (cm_list, qreq_) = query_results[key] # Filter duplicate names if running vsexemplar back.review_queries( cm_list, qreq_=qreq_, query_title=query_title + ' ' + str(key), review_cfg=review_cfg, ) if refresh: back.front.update_tables() logger.info('[back] FINISHED compute_queries: imgsetid=%r' % (imgsetid,)) def get_selected_daids( back, imgsetid=None, daids_mode=None, qaid_list=None, species=None ): daids_mode = back.daids_mode if daids_mode is None else daids_mode daids_mode_valid_kw_dict = { const.VS_EXEMPLARS_KEY: {'is_exemplar': True}, const.INTRA_OCCUR_KEY: {'imgsetid': imgsetid}, 'all': {}, } if qaid_list is not None and species is None: ibs = back.ibs hist_ = ut.dict_hist(ibs.get_annot_species_texts(qaid_list)) logger.info('[back] len(qaid_list)=%r' % (len(qaid_list))) logger.info('[back] hist_ = %r' % (hist_,)) if len(hist_) == 1: # select the query species if there is only one species = hist_.keys()[0] if species is None: species = back.get_selected_species() valid_kw = { 'minqual': 'ok', } if species != const.UNKNOWN: # Query everything if you don't know the species valid_kw['species'] = species mode_str = { const.VS_EXEMPLARS_KEY: 'vs_exemplar', const.INTRA_OCCUR_KEY: 'intra_occurrence', 'all': 'all', }[daids_mode] valid_kw.update(daids_mode_valid_kw_dict[daids_mode]) logger.info('[back] get_selected_daids: ' + mode_str) logger.info('[back] ... valid_kw = ' + ut.repr2(valid_kw)) daid_list = back.ibs.get_valid_aids(**valid_kw) return daid_list def make_confirm_query_msg2( back, species2_expanded_aids, cfgdict=None, query_msg=None, query_title=None ): r""" CommandLine: python -m wbia.gui.guiback --test-MainWindowBackend.make_confirm_query_msg2 --show Example: >>> # xdoctest: +REQUIRES(--gui) >>> from wbia.gui.guiback import * # NOQA >>> import wbia >>> main_locals = wbia.main(defaultdb='testdb1') >>> ibs, back = ut.dict_take(main_locals, ['ibs', 'back']) >>> ut.exec_funckw(back.make_confirm_query_msg2, globals()) >>> imgsetid = ibs.get_imageset_imgsetids_from_text('*All Images') >>> species2_expanded_aids = back._get_expanded_aids_groups(imgsetid) >>> short_msg, detailed_msg = back.make_confirm_query_msg2(species2_expanded_aids) >>> print(short_msg) >>> print(detailed_msg) >>> ut.quit_if_noshow() >>> back.confirm_query_dialog2(species2_expanded_aids) """ ibs = back.ibs species_text = ibs.get_all_species_texts() species_nice = ibs.get_all_species_nice() species_dict = dict(zip(species_text, species_nice)) def get_unique_species_phrase(aid_list): def boldspecies(species): species_bold_nice = "'%s'" % (species_dict.get(species, species).upper(),) return species_bold_nice species_list = list(set(ibs.get_annot_species_texts(aid_list))) species_nice_list = list(map(boldspecies, species_list)) species_phrase = ut.conj_phrase(species_nice_list, 'and') return species_phrase # Build confirmation message fmtdict = dict() msg_fmtstr_list = [] if query_msg is not None: msg_fmtstr_list += [query_msg] if query_title is None: query_title = 'custom' ngroups = len(species2_expanded_aids) if ngroups > 1: msg_fmtstr_list += [ ( 'You are about to run {query_title} ' 'identification with {ngroups} groups...' ).format(query_title=query_title, ngroups=ngroups) ] else: msg_fmtstr_list += [ ('You are about to run {query_title} ' 'identification...').format( query_title=query_title, ) ] species_list = list(species2_expanded_aids.keys()) detailed_msg_list = [] annotstats_kw = {} for count, species in enumerate(species_list): qaids, daids = species2_expanded_aids[species] species_nice = species_dict.get(species, species) # species_phrase = get_unique_species_phrase(qaids + daids) msg_fmtstr_list += [''] fmtdict = {} qaid_stats = ibs.get_annot_stats_dict( qaids, prefix='q', per_name=True, old=False ) daid_stats = ibs.get_annot_stats_dict( daids, prefix='d', per_name=True, old=False ) stats_ = ibs.get_annotconfig_stats( qaids, daids, combined=False, species_hist=True, **annotstats_kw ) fmtdict.update(**qaid_stats) fmtdict.update(**daid_stats) fmtdict['qannots'] = ut.pluralize('annotation', len(qaids)) fmtdict['dannots'] = ut.pluralize('annotation', len(daids)) fmtdict['species_nice'] = species_nice fmtdict['count'] = count # Add simple info if ngroups > 1: part1 = 'Group {count} ' else: part1 = 'This ' part2 = ( part1 + 'will identify {num_qaids} query {qannots} against {num_daids} {species_nice} database {dannots}.' ).format(**fmtdict) msg_fmtstr_list += [part2] # Add detailed info stats_str2 = ut.repr2( stats_, strvals=True, newlines=2, explicit=False, nobraces=False ) detailed_msg_list.append('--- Group %d ---' % (count,)) detailed_msg_list.append(stats_str2) # Finish building confirmation message msg_fmtstr_list += [''] msg_fmtstr_list += ["Press 'Yes' to continue"] msg_fmtstr = '\n'.join(msg_fmtstr_list) msg_str = msg_fmtstr.format(**fmtdict) if cfgdict is not None and len(cfgdict) > 0: detailed_msg_list = [ 'Special Settings: {}'.format(ut.repr2(cfgdict)) ] + detailed_msg_list detailed_msg = '\n'.join(detailed_msg_list) return msg_str, detailed_msg def run_annot_splits(back, aid_list): """ Checks for mismatches within a group of annotations Args: aid_list (int): list of annotation ids CommandLine: python -m wbia.gui.guiback --test-MainWindowBackend.run_annot_splits --show Example: >>> # xdoctest: +REQUIRES(--gui) >>> from wbia.gui.guiback import * # NOQA >>> back = testdata_guiback() >>> ibs = back.ibs >>> aids_list, nids = back.ibs.group_annots_by_name(back.ibs.get_valid_aids()) >>> aid_list = aids_list[ut.list_argmax(list(map(len, aids_list)))] >>> back.run_annot_splits(aid_list) >>> ut.quit_if_noshow() >>> guitool.qtapp_loop(back.mainwin, frequency=100) """ cfgdict = { 'can_match_samename': True, 'K': 3, 'Knorm': 3, 'prescore_method': 'csum', 'score_method': 'csum', } ranks_top = min(len(aid_list), 10) review_cfg = { 'filter_reviewed': False, 'ranks_top': ranks_top, 'name_scoring': False, } ibs = back.ibs cfgdict, review_cfg = back.confirm_query_dialog2( {'split': (aid_list, aid_list)}, cfgdict=cfgdict, query_msg='Checking for SPLIT cases (matching each annotation within a name)', review_cfg=review_cfg, ) qreq_ = ibs.new_query_request(aid_list, aid_list, cfgdict=cfgdict) cm_list = qreq_.execute() back.review_queries( cm_list, qreq_=qreq_, query_title='Annot Splits', review_cfg=review_cfg ) if False: from wbia.viz import viz_graph2 import imp imp.reload(viz_graph2) win = viz_graph2.make_qt_graph_review(qreq_, cm_list, review_cfg=review_cfg) win.show() def run_merge_checks(back): r""" Checks for missed matches within a group of annotations CommandLine: python -m wbia.gui.guiback --test-run_merge_checks --show Example: >>> # xdoctest: +REQUIRES(--gui) >>> from wbia.gui.guiback import * # NOQA >>> back = testdata_guiback() >>> result = back.run_merge_checks() >>> print(result) >>> ut.quit_if_noshow() >>> guitool.qtapp_loop(back.mainwin, frequency=100) """ pass qaid_list = back.ibs.get_valid_aids(is_exemplar=True) cfgdict = { 'can_match_samename': False, # 'K': 3, # 'Knorm': 3, # 'prescore_method': 'csum', # 'score_method': 'csum' } query_msg = 'Checking for MERGE cases (this is an exemplars-vs-exemplars query)' back.compute_queries( qaid_list=qaid_list, daids_mode=const.VS_EXEMPLARS_KEY, query_msg=query_msg, cfgdict=cfgdict, custom_qaid_list_title='Merge Candidates', ) def run_merge_checks_multitons(back): r""" Checks for missed matches within a group of annotations. Only uses annotations with more 2 annots per id. """ pass ibs = back.ibs # qaid_list = back.ibs.get_valid_aids(is_exemplar=True) from wbia import dtool config = dtool.Config.from_dict( { 'K': 1, 'Knorm': 5, 'min_pername': 1, 'max_pername': 1, 'exemplars_per_name': 1, 'method': 'randomize', 'seed': 42, } ) # ibswgt = None dlg = gt.ConfigConfirmWidget.as_dialog( title='Confirm Merge Query', msg='Confirm', config=config ) self = dlg.widget dlg.resize(700, 500) dlg.exec_() logger.info('config = %r' % (config,)) updated_config = self.config # NOQA logger.info('updated_config = %r' % (updated_config,)) min_pername = updated_config['min_pername'] max_pername = updated_config['max_pername'] aid_list = ibs.filter_annots_general( min_pername=min_pername, max_pername=max_pername, minqual='ok' ) if updated_config['method'] == 'randomize': import numpy as np rng = np.random.RandomState(int(updated_config['seed'])) grouped_aids = ibs.group_annots_by_name(aid_list)[0] grouped_aids2 = [ ut.random_sample(aids, updated_config['exemplars_per_name'], rng=rng) for aids in grouped_aids ] aid_list = ut.flatten(grouped_aids2) else: new_flag_list = ibs.get_annot_quality_viewpoint_subset( aid_list, updated_config['exemplars_per_name'], allow_unknown=True ) aid_list = ut.compress(aid_list, new_flag_list) ibs.print_annot_stats(aid_list) daid_list = qaid_list = aid_list # len(aids) cfgdict = { 'can_match_samename': False, 'K': updated_config['K'], 'Knorm': updated_config['Knorm'], # 'prescore_method': 'csum', # 'score_method': 'csum' } query_msg = 'Checking for MERGE cases (this is an special query)' back.compute_queries( qaid_list=qaid_list, daid_list=daid_list, query_msg=query_msg, cfgdict=cfgdict, custom_qaid_list_title='Merge2 Candidates', ) def _get_expanded_aids_groups( back, imgsetid, daids_mode=None, use_prioritized_name_subset=False, use_visual_selection=False, qaid_list=None, daid_list=None, query_is_known=None, partition_queries_by_species=True, remove_unknown_species=None, ): """ Get the query annotation ids to search and the database annotation ids to be searched The query is either a specific selection or everything from this image set that matches the appropriate filters. Example: >>> # DISABLE_DOCTEST >>> ut.exec_funckw(back._get_expanded_aids_groups, globals()) >>> imgsetid = ibs.get_imageset_imgsetids_from_text('*All Images') >>> species2_expanded_aids = back._get_expanded_aids_groups(imgsetid) """ ibs = back.ibs daids_mode = back.daids_mode if daids_mode is None else daids_mode if imgsetid is None: raise Exception('[back] invalid imgsetid') if ibs.cfg.other_cfg.enable_custom_filter: back.user_warning( msg=ut.codeblock( """ other_cfg.enable_custom_filter=True is not longer supported. Please turn off in Preferences """ ) ) if remove_unknown_species is None: # Default behavior is don't remove unknown species if qaid_list is specified remove_unknown_species = qaid_list is not None or use_visual_selection # Query aids are either: given, taken from gui selection, or by imageset if qaid_list is not None: qaid_list = qaid_list elif use_visual_selection: qaid_list = back.get_selected_aids() else: qaid_list = ibs.get_valid_aids( imgsetid=imgsetid, is_known=query_is_known, minqual='ok' ) logger.info('[back] Initially loaded len(qaid_list) = %r' % (len(qaid_list),)) if use_prioritized_name_subset: # Pick only a few queries per name to execute annots_per_view = 2 # FIXME: use a configuration new_flag_list = back.ibs.get_annot_quality_viewpoint_subset( aid_list=qaid_list, annots_per_view=annots_per_view, allow_unknown=True, verbose=True, ) qaid_list = ut.compress(qaid_list, new_flag_list) logger.info( '[back] Filtered query by quality and viewpoint: len(qaid_list) = %r' % (len(qaid_list),) ) logger.info('[back] Found len(qaid_list) = %r' % (len(qaid_list),))
+ L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t f = L_dash_d_t > 0 L_dashdash_ba1_d_t[f] = L_dash_ba1_d_t[f] - L_sun_d_t[f] * (L_dash_ba1_d_t[f] / L_dash_d_t[f]) return L_dashdash_ba1_d_t def get_L_dashdash_ba2_d_t(L_dash_ba2_d_t): """1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h) (4g) Args: L_dash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯負荷 (MJ/h) Returns: 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h) """ return L_dash_ba2_d_t def calc_L_sun_d_t(region, sol_region=None, solar_device=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None, W_tnk_ss=None, hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None, V_fan_P0=None, d0=None, d1=None, m_fan_test=None, W_tnk_ass=None, Theta_wtr_d=None, L_dash_k_d_t=None, L_dash_s_d_t=None, L_dash_w_d_t=None, L_dash_b1_d_t=None, L_dash_b2_d_t=None, L_dash_ba1_d_t=None): """太陽熱利用給湯設備による補正集熱量 Args: region(int): 省エネルギー地域区分 sol_region(int, optional): 年間の日射地域区分 (Default value = None) solar_device(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None) ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None) A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None) P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None) P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None) W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None) W_tnk_ass(float, optional): タンク容量 (L) (Default value = None) Theta_wtr_d(ndarray, optional): 日平均給水温度 (℃) (Default value = None) L_dash_k_d_t(ndarrayL, optional): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None) L_dash_s_d_t(ndarray, optional): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None) L_dash_w_d_t(ndarray, optional): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None) L_dash_b1_d_t(ndarray, optional): 1時間当たりの浴槽水栓湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None) L_dash_b2_d_t(ndarray, optional): 1時間当たりの浴槽自動湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None) L_dash_ba1_d_t(ndarray, optional): 1時間当たりの浴槽水栓さし湯における節湯補正給湯熱負荷 (MJ/h) (Default value = None) hotwater_use: Default value = None) heating_flag_d: Default value = None) A_col: Default value = None) P_alpha: Default value = None) P_beta: Default value = None) V_fan_P0: Default value = None) d0: Default value = None) d1: Default value = None) m_fan_test: Default value = None) Returns: ndarray: 1時間当たりの太陽熱利用設備による補正集熱量 (MJ/h) """ if solar_device == '液体集熱式': return lss.calc_L_sun_lss_d_t( region=region, sol_region=sol_region, ls_type=ls_type, A_sp=A_sp, P_alpha_sp=P_alpha_sp, P_beta_sp=P_beta_sp, W_tnk_ss=W_tnk_ss, Theta_wtr_d=Theta_wtr_d, L_dash_k_d_t=L_dash_k_d_t, L_dash_s_d_t=L_dash_s_d_t, L_dash_w_d_t=L_dash_w_d_t, L_dash_b1_d_t=L_dash_b1_d_t, L_dash_b2_d_t=L_dash_b2_d_t, L_dash_ba1_d_t=L_dash_ba1_d_t ) elif solar_device == '空気集熱式': if hotwater_use == True: outdoor = load_outdoor() Theta_ex_d_t = get_Theta_ex(region, outdoor) Theta_col_nonopg_d_t, Theta_col_opg_d_t = ass.calc_Theta_col(A_col, P_alpha, P_beta, V_fan_P0, d0, d1, m_fan_test, region, sol_region, Theta_ex_d_t) t_fan_d_t = ass.get_t_fan_d_t(Theta_col_nonopg_d_t, Theta_col_opg_d_t) t_cp_d_t = ass.get_t_cp_d_t(hotwater_use, t_fan_d_t, heating_flag_d) V_fan_d_t = ass.get_V_fan_d_t(t_fan_d_t, V_fan_P0) Q_col_d_t = ass.get_Q_col_d_t(V_fan_d_t, Theta_col_opg_d_t, Theta_ex_d_t) Q_d = ass.calc_Q_d(Q_col_d_t, t_cp_d_t) L_tnk_d = ass.calc_L_tnk_d(Q_d, W_tnk_ass, Theta_wtr_d) return ass.calc_L_sun_ass_d_t(L_tnk_d, L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t) else: return np.zeros(24 * 365) elif solar_device is None: return np.zeros(24 * 365) else: raise ValueError(solar_device) # ============================================================================ # 8. 節湯補正給湯熱負荷 # ============================================================================ def get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d): """台所水栓における節湯補正給湯負荷 (MJ/h) (5a) Args: W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h) Theta_sw_k(int): 台所水栓における基給湯量 (℃) Theta_wtr_d(ndarray): 日平均給水温度 (℃) Returns: ndarray: 台所水栓における節湯補正給湯負荷 (MJ/h) """ return W_dash_k_d_t * (Theta_sw_k - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) def get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d): """浴室シャワー水栓における節湯補正給湯負荷 (5b) Args: W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h) Theta_sw_s(int): 浴室シャワーにおける基給湯量 (℃) Theta_wtr_d(ndarray): 日平均給水温度 (℃) Returns: ndarray: 浴室シャワーにおける節湯補正給湯負荷 (MJ/h) """ return W_dash_s_d_t * (Theta_sw_s - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) def get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d): """洗面水栓における節湯補正給湯負荷 (5c) Args: W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d) Theta_sw_w(int): 洗面水栓における基給湯量 (℃) Theta_wtr_d(ndarray): 日平均給水温度 (℃) Returns: ndarray: 洗面水栓における節湯補正給湯負荷 (MJ/d) """ return W_dash_w_d_t * (Theta_sw_w - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) def get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bash_function): """浴槽水栓湯はり時における節水補正給湯熱負荷 L_dash_b1_d, L_dash_b2_d Args: W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d) W_dash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/d) Theta_wtr_d(ndarray): 日平均給水温度 (℃) has_bath(bool): 浴室用の有無 bash_function(str): ふろ機能の種類 Returns: ndarray: 浴槽水栓湯はり時・浴槽自動湯はり時における節水補正給湯熱負荷 (MJ/d) """ if has_bath == False: L_dash_b1_d_t = np.zeros(24 * 365) # (5-1d) L_dash_b2_d_t = np.zeros(24 * 365) # (5-1e) return L_dash_b1_d_t, L_dash_b2_d_t elif bash_function == '給湯単機能': Theta_sw_b1 = get_Theta_sw_b1() L_dash_b1_d_t = W_dash_b1_d_t * (Theta_sw_b1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2d) L_dash_b2_d_t = np.zeros(24 * 365) # (5-2e) return L_dash_b1_d_t, L_dash_b2_d_t elif bash_function == 'ふろ給湯機(追焚あり)' or bash_function == 'ふろ給湯機(追焚なし)': Theta_sw_b2 = get_Theta_sw_b2() L_dash_b1_d_t = np.zeros(24 * 365) # (5-3d) L_dash_b2_d_t = W_dash_b2_d_t * (Theta_sw_b2 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-3e) return L_dash_b1_d_t, L_dash_b2_d_t else: raise ValueError(bash_function) def get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bash_function): """浴槽水栓さし湯時における節水補正給湯熱負荷 L_dash_ba1_d, L_dash_ba2_d Args: W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h) Theta_wtr_d(ndarray): 日平均給水温度 (℃) L_ba_d_t(ndarray): 1時間当たりの浴槽沸かし直しによる給湯熱負荷 (MJ/h) has_bath(bool): 浴室等の有無 bash_function(str): ふろ機能の種類 (給湯単機能,ふろ給湯機(追焚なし),ふろ給湯機(追焚あり)) Returns: ndarray: 浴槽水栓さし湯時/浴槽追焚時における節水補正給湯熱負荷 (MJ/h) """ if has_bath == False: L_dash_ba1_d_t = np.zeros(24 * 365) # (5-1f) L_dash_ba2_d_t = np.zeros(24 * 365) # (5-1g) return L_dash_ba1_d_t, L_dash_ba2_d_t elif bash_function == '給湯単機能' or bash_function == 'ふろ給湯機(追焚なし)': Theta_sw_ba1 = get_Theta_sw_ba1() L_dash_ba1_d_t = W_dash_ba1_d_t * (Theta_sw_ba1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2f) L_dash_ba2_d_t = np.zeros(24 * 365) # (5-2g) return L_dash_ba1_d_t, L_dash_ba2_d_t elif bash_function == 'ふろ給湯機(追焚あり)': L_dash_ba1_d_t = np.zeros(24 * 365) # (5-3f) L_dash_ba2_d_t = L_ba_d_t * 1.25 # (5-3g) return L_dash_ba1_d_t, L_dash_ba2_d_t else: raise ValueError(bash_function) def get_Theta_sw_k(): """台所水栓の基準給湯温度 Args: Returns: int: 台所水栓の基準給湯温度 """ return get_table_5()[0] def get_Theta_sw_s(): """浴室シャワー水栓の基準給湯温度 Args: Returns: int: 浴室シャワー水栓の基準給湯温度 """ return get_table_5()[1] def get_Theta_sw_w(): """洗面水栓の基準給湯温度 Args: Returns: int: 洗面水栓の基準給湯温度 """ return get_table_5()[2] def get_Theta_sw_b1(): """浴槽水栓湯はりの基準給湯温度 Args: Returns: int: 浴槽水栓湯はりの基準給湯温度 """ return get_table_5()[3] def get_Theta_sw_b2(): """浴槽自動湯はりの基準給湯温度 Args: Returns: int: 浴槽自動湯はりの基準給湯温度 """ return get_table_5()[4] def get_Theta_sw_ba1(): """浴槽水栓さし湯の基準給湯温度 Args: Returns: int: 浴槽水栓さし湯の基準給湯温度 """ return get_table_5()[5] def get_table_5(): """表 5 用途ごとの基準給湯温度 Args: Returns: list: 用途ごとの基準給湯温度 """ table_5 = [ 40, 40, 40, 40, 40, 60 ] return table_5 # ============================================================================ # 9. 節湯補正給湯量 # ============================================================================ def calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d): """1時間当たりの台所水栓における節湯補正給湯量 [L/h] (6a) Args: W_k_d_t(ndarray): 1時間当たりの台所水栓における基準給湯量 (L/h) kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無 kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無 pipe_diameter(str): ヘッダー分岐後の径 Theta_wtr_d(ndarray): 日平均給水温度 (℃) Returns: ndarray: 1時間当たりの台所水栓における節湯補正給湯量 (L/h) """ # 台所水栓における節湯の効果係数 f_sk = watersaving.get_f_sk(kitchen_watersaving_A, kitchen_watersaving_C, Theta_wtr_d) # 配管における節湯の効果係数 f_sp = watersaving.get_f_sp(pipe_diameter) return W_k_d_t * np.repeat(f_sk, 24) * f_sp def calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter): """1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h) (6a) Args: W_s_d_t(ndarray): 浴室シャワーにおける基準給湯量 (L/h) shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無 shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無 pipe_diameter(str): ヘッダー分岐後の径 Returns: ndarray: 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h) """ # 浴室シャワー水栓のける節湯の効果係数 f_ss = watersaving.get_f_ss(shower_watersaving_A, shower_watersaving_B) # 配管における節湯の効果係数 f_sp = watersaving.get_f_sp(pipe_diameter) return W_s_d_t * f_ss * f_sp def calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d): """1時間当たりの台所水栓における節湯補正給湯量 (L/h) (6c) Args: W_w_d_t(ndarray): 台所水栓における基準給湯量 (L/h) washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無 pipe_diameter(str): ヘッダー分岐後の径 Theta_wtr_d(ndarray): 日平均給水温度 (℃) Returns: ndarray: 1時間当たりの台所水栓における節湯補正給湯量 (L/h) """ # 配管における節湯の効果係数 f_sp = watersaving.get_f_sp(pipe_diameter) # 洗面水栓における節湯の効果係数 f_sw = watersaving.get_f_sw(washbowl_watersaving_C, Theta_wtr_d) return W_w_d_t * np.repeat(f_sw, 24) * f_sp def calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter): """1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h) (6d) Args: W_b1_d_t(ndarray): 浴槽水栓湯はり時における基準給湯量 (L/h) pipe_diameter(str): ヘッダー分岐後の径 Returns: ndarray: 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h) """ # 配管における節湯の効果係数 f_sp = watersaving.get_f_sp(pipe_diameter) # 浴槽における節湯の効果係数 f_sb = watersaving.get_f_sb() return W_b1_d_t * f_sp * f_sb def calc_W_dash_b2_d_t(W_b2_d_t): """1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/h) (6e) Args: W_b2_d_t(ndarray): 浴槽自動湯はり時における基準給湯量 (L/h) Returns: ndarray: 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/h) """ # 浴槽における節湯の効果係数 f_sb = watersaving.get_f_sb() return W_b2_d_t * f_sb def calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter): """1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h) (6f) Args: W_ba1_d_t(ndarray): 1時間当たりの浴室水栓さし湯時における基準給湯量 (L/h) pipe_diameter(str): ヘッダー分岐後の径 Returns: 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h) """ # 配管における節湯の効果係数 f_sp = watersaving.get_f_sp(pipe_diameter) return W_ba1_d_t * f_sp # ============================================================================ # 10. 基準給湯量 # ============================================================================ def calc_W_k_d_t(n_p, schedule_hw): """1時間当たりの台所水栓における基準給湯量 (L/h) (7a) Args: n_p(float): 仮想居住人数 (人) schedule_hw(ndarray): 給湯スケジュール Returns: ndarray: 1時間当たりの台所水栓における基準給湯量 (L/h) """ if n_p in [1, 2, 3, 4]: return calc_W_k_p_d_t(n_p, schedule_hw) elif 1 <= n_p and n_p <= 2: W_k_1_d_t = calc_W_k_p_d_t(1, schedule_hw) W_k_2_d_t = calc_W_k_p_d_t(2, schedule_hw) return W_k_1_d_t * (2 - n_p) / (2 - 1) + W_k_2_d_t * (n_p - 1) / (2 - 1) elif 2 <= n_p and n_p <= 3: W_k_2_d_t = calc_W_k_p_d_t(2, schedule_hw) W_k_3_d_t = calc_W_k_p_d_t(3, schedule_hw) return W_k_2_d_t * (3 - n_p) / (3 - 2) + W_k_3_d_t * (n_p - 2) / (3 - 2) elif 3 <= n_p and n_p <= 4: W_k_3_d_t = calc_W_k_p_d_t(3, schedule_hw) W_k_4_d_t = calc_W_k_p_d_t(4, schedule_hw) return W_k_3_d_t * (4 - n_p) / (4 - 3) + W_k_4_d_t * (n_p - 3) / (4 - 3) def calc_W_s_d_t(n_p, schedule_hw, has_bath): """1時間当たりの浴室シャワー水栓における基準給湯量 (7b) Args: n_p(float): 仮想居住人数 (人) schedule_hw(ndarray): 給湯スケジュール has_bath(bool): 浴室等の有無 Returns: ndarray: 1時間当たりの浴室シャワー水栓における基準給湯量 (L/h) """ if n_p in [1, 2, 3, 4]: return calc_W_s_p_d_t(n_p, schedule_hw, has_bath) elif 1 <= n_p and n_p <= 2: W_s_1_d_t = calc_W_s_p_d_t(1, schedule_hw, has_bath) W_s_2_d_t = calc_W_s_p_d_t(2, schedule_hw, has_bath) return W_s_1_d_t * (2 - n_p) / (2 - 1) + W_s_2_d_t * (n_p - 1) / (2 - 1) elif 2 <= n_p and n_p <= 3: W_s_2_d_t = calc_W_s_p_d_t(2, schedule_hw, has_bath) W_s_3_d_t = calc_W_s_p_d_t(3, schedule_hw, has_bath) return W_s_2_d_t * (3 - n_p) / (3 - 2) + W_s_3_d_t * (n_p - 2) / (3 - 2) elif 3 <= n_p and n_p <= 4: W_s_3_d_t = calc_W_s_p_d_t(3, schedule_hw, has_bath) W_s_4_d_t = calc_W_s_p_d_t(4, schedule_hw, has_bath) return W_s_3_d_t * (4 - n_p) / (4 - 3) + W_s_4_d_t * (n_p - 3) / (4 - 3) def calc_W_w_d_t(n_p, schedule_hw): """1時間当たりの洗面水栓における基準給湯量 (7c) Args: n_p(float): 仮想居住人数 (人) schedule_hw(ndarray): 給湯スケジュール Returns: ndarray: 1時間当たりの洗面水栓における基準給湯量 (L/h) """ if n_p in [1, 2, 3, 4]: return calc_W_w_p_d_t(n_p, schedule_hw) elif 1 <= n_p and n_p <= 2: W_w_1_d_t = calc_W_w_p_d_t(1, schedule_hw) W_w_2_d_t = calc_W_w_p_d_t(2, schedule_hw) return W_w_1_d_t * (2 -
Per context we keep track of one current program. """ if self._handle != self._parser.env.get('current_program', False): self._parser.env['current_program'] = self._handle gl.glUseProgram(self._handle) def deactivate(self): """Avoid overhead in calling glUseProgram with same arg. Warning: this will break if glUseProgram is used somewhere else. Per context we keep track of one current program. """ if self._parser.env.get('current_program', 0) != 0: self._parser.env['current_program'] = 0 gl.glUseProgram(0) def set_shaders(self, vert, frag): """This function takes care of setting the shading code and compiling+linking it into a working program object that is ready to use. """ self._linked = False # For both vertex and fragment shader: set source, compile, check for code, type_ in [(vert, 'vertex'), (frag, 'fragment')]: self.attach_shader(code, type_) self.link_program() def attach(self, id_): """Attach a shader to this program.""" shader = self._parser.get_object(id_) gl.glAttachShader(self._handle, shader.handle) self._attached_shaders.append(shader) def link_program(self): """Link the complete program and check. All shaders are detached and deleted if the program was successfully linked. """ gl.glLinkProgram(self._handle) if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS): raise RuntimeError('Program linking error:\n%s' % gl.glGetProgramInfoLog(self._handle)) # Detach all shaders to prepare them for deletion (they are no longer # needed after linking is complete) for shader in self._attached_shaders: gl.glDetachShader(self._handle, shader.handle) self._attached_shaders = [] # Now we know what variables will be used by the program self._unset_variables = self._get_active_attributes_and_uniforms() self._handles = {} self._known_invalid = set() self._linked = True def _get_active_attributes_and_uniforms(self): """Retrieve active attributes and uniforms to be able to check that all uniforms/attributes are set by the user. Other GLIR implementations may omit this. """ # This match a name of the form "name[size]" (= array) regex = re.compile(r"""(?P<name>\w+)\s*(\[(?P<size>\d+)\])\s*""") # Get how many active attributes and uniforms there are cu = gl.glGetProgramParameter(self._handle, gl.GL_ACTIVE_UNIFORMS) ca = gl.glGetProgramParameter(self.handle, gl.GL_ACTIVE_ATTRIBUTES) # Get info on each one attributes = [] uniforms = [] for container, count, func in [(attributes, ca, gl.glGetActiveAttrib), (uniforms, cu, gl.glGetActiveUniform)]: for i in range(count): name, size, gtype = func(self._handle, i) m = regex.match(name) # Check if xxx[0] instead of xx if m: name = m.group('name') for i in range(size): container.append(('%s[%d]' % (name, i), gtype)) else: container.append((name, gtype)) # return attributes, uniforms return set([v[0] for v in attributes] + [v[0] for v in uniforms]) def set_texture(self, name, value): """Set a texture sampler. Value is the id of the texture to link.""" if not self._linked: raise RuntimeError('Cannot set uniform when program has no code') # Get handle for the uniform, first try cache handle = self._handles.get(name, -1) if handle < 0: if name in self._known_invalid: return handle = gl.glGetUniformLocation(self._handle, name) self._unset_variables.discard(name) # Mark as set self._handles[name] = handle # Store in cache if handle < 0: self._known_invalid.add(name) logger.info('Not setting texture data for variable %s; ' 'uniform is not active.' % name) return # Program needs to be active in order to set uniforms self.activate() if True: # Sampler: the value is the id of the texture tex = self._parser.get_object(value) if tex == JUST_DELETED: return if tex is None: raise RuntimeError('Could not find texture with id %i' % value) unit = len(self._samplers) if name in self._samplers: unit = self._samplers[name][-1] # Use existing unit self._samplers[name] = tex._target, tex.handle, unit gl.glUniform1i(handle, unit) def set_uniform(self, name, type_, value): """Set a uniform value. Value is assumed to have been checked.""" if not self._linked: raise RuntimeError('Cannot set uniform when program has no code') # Get handle for the uniform, first try cache handle = self._handles.get(name, -1) count = 1 if handle < 0: if name in self._known_invalid: return handle = gl.glGetUniformLocation(self._handle, name) self._unset_variables.discard(name) # Mark as set # if we set a uniform_array, mark all as set if not type_.startswith('mat'): count = value.nbytes // (4 * self.ATYPEINFO[type_][0]) if count > 1: for ii in range(count): if '%s[%s]' % (name, ii) in self._unset_variables: self._unset_variables.discard('%s[%s]' % (name, ii)) self._handles[name] = handle # Store in cache if handle < 0: self._known_invalid.add(name) logger.info('Not setting value for variable %s %s; ' 'uniform is not active.' % (type_, name)) return # Look up function to call funcname = self.UTYPEMAP[type_] func = getattr(gl, funcname) # Program needs to be active in order to set uniforms self.activate() # Triage depending on type if type_.startswith('mat'): # Value is matrix, these gl funcs have alternative signature transpose = False # OpenGL ES 2.0 does not support transpose func(handle, 1, transpose, value) else: # Regular uniform func(handle, count, value) def set_attribute(self, name, type_, value): """Set an attribute value. Value is assumed to have been checked.""" if not self._linked: raise RuntimeError('Cannot set attribute when program has no code') # Get handle for the attribute, first try cache handle = self._handles.get(name, -1) if handle < 0: if name in self._known_invalid: return handle = gl.glGetAttribLocation(self._handle, name) self._unset_variables.discard(name) # Mark as set self._handles[name] = handle # Store in cache if handle < 0: self._known_invalid.add(name) if value[0] != 0 and value[2] > 0: # VBO with offset return # Probably an unused element in a structured VBO logger.info('Not setting data for variable %s %s; ' 'attribute is not active.' % (type_, name)) return # Program needs to be active in order to set uniforms self.activate() # Triage depending on VBO or tuple data if value[0] == 0: # Look up function call funcname = self.ATYPEMAP[type_] func = getattr(gl, funcname) # Set data self._attributes[name] = 0, handle, func, value[1:] else: # Get meta data vbo_id, stride, offset = value size, gtype, dtype = self.ATYPEINFO[type_] # Get associated VBO vbo = self._parser.get_object(vbo_id) if vbo == JUST_DELETED: return if vbo is None: raise RuntimeError('Could not find VBO with id %i' % vbo_id) # Set data func = gl.glVertexAttribPointer args = size, gtype, gl.GL_FALSE, stride, offset self._attributes[name] = vbo.handle, handle, func, args def _pre_draw(self): self.activate() # Activate textures for tex_target, tex_handle, unit in self._samplers.values(): gl.glActiveTexture(gl.GL_TEXTURE0 + unit) gl.glBindTexture(tex_target, tex_handle) # Activate attributes for vbo_handle, attr_handle, func, args in self._attributes.values(): if vbo_handle: gl.glBindBuffer(gl.GL_ARRAY_BUFFER, vbo_handle) gl.glEnableVertexAttribArray(attr_handle) func(attr_handle, *args) else: gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0) gl.glDisableVertexAttribArray(attr_handle) func(attr_handle, *args) # Validate. We need to validate after textures units get assigned if not self._validated: self._validated = True self._validate() def _validate(self): # Validate ourselves if self._unset_variables: logger.warning('Program has unset variables: %r' % self._unset_variables) # Validate via OpenGL gl.glValidateProgram(self._handle) if not gl.glGetProgramParameter(self._handle, gl.GL_VALIDATE_STATUS): raise RuntimeError('Program validation error:\n%s' % gl.glGetProgramInfoLog(self._handle)) def _post_draw(self): # No need to deactivate each texture/buffer, just set to 0 gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0) gl.glBindTexture(gl.GL_TEXTURE_2D, 0) if USE_TEX_3D: gl.glBindTexture(GL_TEXTURE_3D, 0) gl.glBindTexture(GL_TEXTURE_1D, 0) # Deactivate program - should not be necessary. In single-program # apps it would not even make sense. # self.deactivate() def draw(self, mode, selection): """Draw program in given mode, with given selection (IndexBuffer or first, count). """ if not self._linked: raise RuntimeError('Cannot draw program if code has not been set') # Init gl.check_error('Check before draw') try: mode = as_enum(mode) except ValueError: if mode == 'lines_adjacency' or mode == 'line_strip_adjacency': raise RuntimeError(gl.current_backend.__name__ + " backend does not support lines_adjacency" " and line_strip_adjacency primitives." " Try gloo.gl.use_gl('gl+').") raise # Draw if len(selection) == 3: # Selection based on indices id_, gtype, count = selection if count: self._pre_draw() ibuf = self._parser.get_object(id_) ibuf.activate() gl.glDrawElements(mode, count, as_enum(gtype), None) ibuf.deactivate() else: # Selection based on start and count first, count = selection if count: self._pre_draw() gl.glDrawArrays(mode, first, count) # Wrap up gl.check_error('Check after draw') self._post_draw() class GlirBuffer(GlirObject): _target = None _usage = gl.GL_DYNAMIC_DRAW # STATIC_DRAW, STREAM_DRAW or DYNAMIC_DRAW def create(self): self._handle = gl.glCreateBuffer() self._buffer_size = 0 self._bufferSubDataOk = False def delete(self): gl.glDeleteBuffer(self._handle) def activate(self): gl.glBindBuffer(self._target, self._handle) def deactivate(self): gl.glBindBuffer(self._target, 0) def set_size(self, nbytes): # in bytes if nbytes != self._buffer_size: self.activate() gl.glBufferData(self._target, nbytes, self._usage) self._buffer_size = nbytes def set_data(self, offset, data): self.activate() nbytes = data.nbytes # Determine whether to check errors to try handling the ATI bug check_ati_bug = ((not self._bufferSubDataOk) and (gl.current_backend.__name__.split(".")[-1] == "gl2") and sys.platform.startswith('win')) # flush any pending errors if check_ati_bug: gl.check_error('periodic check') try: gl.glBufferSubData(self._target, offset, data) if check_ati_bug: gl.check_error('glBufferSubData') self._bufferSubDataOk = True # glBufferSubData seems to work except Exception: # This might be due to a driver error (seen on ATI), issue #64. # We try to detect
import unittest import numpy try: import scipy.sparse scipy_available = True except ImportError: scipy_available = False import cupy import cupy.sparse from cupy import testing def _make(xp, sp, dtype): data = xp.array([0, 1, 2, 3], dtype) row = xp.array([0, 0, 1, 2], 'i') col = xp.array([0, 1, 3, 2], 'i') # 0, 1, 0, 0 # 0, 0, 0, 2 # 0, 0, 3, 0 return sp.coo_matrix((data, (row, col)), shape=(3, 4)) def _make2(xp, sp, dtype): data = xp.array([1, 2, 3, 4], dtype) row = xp.array([0, 1, 1, 2], 'i') col = xp.array([2, 1, 2, 2], 'i') # 0, 0, 1, 0 # 0, 2, 3, 0 # 0, 0, 4, 0 return sp.coo_matrix((data, (row, col)), shape=(3, 4)) def _make3(xp, sp, dtype): data = xp.array([1, 2, 3, 4, 5], dtype) row = xp.array([0, 1, 1, 3, 3], 'i') col = xp.array([0, 2, 1, 0, 2], 'i') # 1, 0, 0 # 0, 3, 2 # 0, 0, 0 # 4, 0, 5 return sp.coo_matrix((data, (row, col)), shape=(4, 3)) def _make_unordered(xp, sp, dtype): data = xp.array([1, 4, 3, 2], dtype) row = xp.array([0, 2, 1, 0], 'i') col = xp.array([0, 2, 3, 1], 'i') # 1, 2, 0, 0 # 0, 0, 0, 3 # 0, 0, 4, 0 return sp.coo_matrix((data, (row, col)), shape=(3, 4)) def _make_duplicate(xp, sp, dtype): data = xp.array([0, 1, 2, 3, 4, 5], dtype) row = xp.array([1, 1, 1, 1, 0, 1], 'i') col = xp.array([0, 0, 2, 0, 0, 2], 'i') # 4, 0, 0, 0 # 4, 0, 7, 0 # 0, 0, 0, 0 return sp.coo_matrix((data, (row, col)), shape=(3, 4)) def _make_empty(xp, sp, dtype): data = xp.array([], dtype) row = xp.array([], 'i') col = xp.array([], 'i') return sp.coo_matrix((data, (row, col)), shape=(3, 4)) def _make_square(xp, sp, dtype): data = xp.array([0, 1, 2, 3], dtype) row = xp.array([0, 0, 1, 2], 'i') col = xp.array([0, 2, 0, 2], 'i') # 0, 1, 0 # 2, 0, 0 # 0, 0, 3 return sp.coo_matrix((data, (row, col)), shape=(3, 3)) def _make_shape(xp, sp, dtype): return sp.coo_matrix((3, 4)) @testing.parameterize(*testing.product({ 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128], })) class TestCooMatrix(unittest.TestCase): def setUp(self): self.m = _make(cupy, cupy.sparse, self.dtype) def test_dtype(self): self.assertEqual(self.m.dtype, self.dtype) def test_data(self): self.assertEqual(self.m.data.dtype, self.dtype) testing.assert_array_equal( self.m.data, cupy.array([0, 1, 2, 3], self.dtype)) def test_row(self): self.assertEqual(self.m.row.dtype, numpy.int32) testing.assert_array_equal( self.m.row, cupy.array([0, 0, 1, 2], self.dtype)) def test_col(self): self.assertEqual(self.m.col.dtype, numpy.int32) testing.assert_array_equal( self.m.col, cupy.array([0, 1, 3, 2], self.dtype)) def test_init_copy(self): n = cupy.sparse.coo_matrix(self.m) self.assertIsNot(n, self.m) cupy.testing.assert_array_equal(n.toarray(), self.m.toarray()) def test_init_copy_other_sparse(self): n = cupy.sparse.coo_matrix(self.m.tocsr()) cupy.testing.assert_array_equal(n.toarray(), self.m.toarray()) @unittest.skipUnless(scipy_available, 'requires scipy') def test_init_copy_scipy_sparse(self): m = _make(numpy, scipy.sparse, self.dtype) n = cupy.sparse.coo_matrix(m) self.assertIsInstance(n.data, cupy.ndarray) self.assertIsInstance(n.row, cupy.ndarray) self.assertIsInstance(n.col, cupy.ndarray) cupy.testing.assert_array_equal(n.data, m.data) cupy.testing.assert_array_equal(n.row, m.row) cupy.testing.assert_array_equal(n.col, m.col) self.assertEqual(n.shape, m.shape) @unittest.skipUnless(scipy_available, 'requires scipy') def test_init_copy_other_scipy_sparse(self): m = _make(numpy, scipy.sparse, self.dtype) n = cupy.sparse.coo_matrix(m.tocsc()) self.assertIsInstance(n.data, cupy.ndarray) self.assertIsInstance(n.row, cupy.ndarray) self.assertIsInstance(n.col, cupy.ndarray) cupy.testing.assert_array_equal(n.data, m.data) cupy.testing.assert_array_equal(n.row, m.row) cupy.testing.assert_array_equal(n.col, m.col) self.assertEqual(n.shape, m.shape) def test_shape(self): self.assertEqual(self.m.shape, (3, 4)) def test_ndim(self): self.assertEqual(self.m.ndim, 2) def test_nnz(self): self.assertEqual(self.m.nnz, 4) def test_has_canonical_format(self): self.assertFalse(self.m.has_canonical_format) @unittest.skipUnless(scipy_available, 'requires scipy') def test_get(self): m = self.m.get() self.assertIsInstance(m, scipy.sparse.coo_matrix) expect = [ [0, 1, 0, 0], [0, 0, 0, 2], [0, 0, 3, 0] ] numpy.testing.assert_allclose(m.toarray(), expect) @unittest.skipUnless(scipy_available, 'requires scipy') def test_str(self): if numpy.dtype(self.dtype).kind == 'f': expect = ''' (0, 0)\t0.0 (0, 1)\t1.0 (1, 3)\t2.0 (2, 2)\t3.0''' elif numpy.dtype(self.dtype).kind == 'c': expect = ''' (0, 0)\t0j (0, 1)\t(1+0j) (1, 3)\t(2+0j) (2, 2)\t(3+0j)''' self.assertEqual(str(self.m), expect) def test_toarray(self): m = self.m.toarray() expect = [ [0, 1, 0, 0], [0, 0, 0, 2], [0, 0, 3, 0] ] cupy.testing.assert_allclose(m, expect) @testing.parameterize(*testing.product({ 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128], })) @unittest.skipUnless(scipy_available, 'requires scipy') class TestCooMatrixInit(unittest.TestCase): def setUp(self): self.shape = (3, 4) def data(self, xp): return xp.array([0, 1, 2, 3], self.dtype) def row(self, xp): return xp.array([0, 0, 1, 2], 'i') def col(self, xp): return xp.array([0, 1, 3, 2], 'i') @testing.numpy_cupy_equal(sp_name='sp') def test_shape_none(self, xp, sp): x = sp.coo_matrix( (self.data(xp), (self.row(xp), self.col(xp))), shape=None) self.assertEqual(x.shape, (3, 4)) @testing.numpy_cupy_equal(sp_name='sp') def test_dtype(self, xp, sp): data = self.data(xp).real.astype('i') x = sp.coo_matrix( (data, (self.row(xp), self.col(xp))), dtype=self.dtype) self.assertEqual(x.dtype, self.dtype) @testing.numpy_cupy_equal(sp_name='sp') def test_copy_true(self, xp, sp): data = self.data(xp) row = self.row(xp) col = self.col(xp) x = sp.coo_matrix((data, (row, col)), copy=True) self.assertIsNot(data, x.data) self.assertIsNot(row, x.row) self.assertIsNot(col, x.col) @testing.numpy_cupy_raises(sp_name='sp') def test_invalid_format(self, xp, sp): sp.coo_matrix( (self.data(xp), self.row(xp)), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_shape_invalid(self, xp, sp): sp.coo_matrix( (self.data(xp), (self.row(xp), self.col(xp))), shape=(2,)) def test_data_invalid(self): with self.assertRaises(ValueError): cupy.sparse.coo_matrix( ('invalid', (self.row(cupy), self.col(cupy))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_data_invalid_ndim(self, xp, sp): sp.coo_matrix( (self.data(xp)[None], (self.row(xp), self.col(xp))), shape=self.shape) def test_row_invalid(self): with self.assertRaises(ValueError): cupy.sparse.coo_matrix( (self.data(cupy), ('invalid', self.col(cupy))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_row_invalid_ndim(self, xp, sp): sp.coo_matrix( (self.data(xp), (self.row(xp)[None], self.col(xp))), shape=self.shape) def test_col_invalid(self): with self.assertRaises(ValueError): cupy.sparse.coo_matrix( (self.data(cupy), (self.row(cupy), 'invalid')), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_col_invalid_ndim(self, xp, sp): sp.coo_matrix( (self.data(xp), (self.row(xp), self.col(xp)[None])), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_data_different_length(self, xp, sp): data = xp.arange(5, dtype=self.dtype) sp.coo_matrix( (data(xp), (self.row(xp), self.col(xp))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_row_different_length(self, xp, sp): row = xp.arange(5, dtype=self.dtype) sp.coo_matrix( (self.data(xp), (row(xp), self.col(xp))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_col_different_length(self, xp, sp): col = xp.arange(5, dtype=self.dtype) sp.coo_matrix( (self.data(xp), (self.row(xp), col(xp))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_fail_to_infer_shape(self, xp, sp): data = xp.array([], dtype=self.dtype) row = xp.array([], dtype='i') col = xp.array([], dtype='i') sp.coo_matrix((data, (row, col)), shape=None) @testing.numpy_cupy_raises(sp_name='sp') def test_row_too_large(self, xp, sp): row = xp.array([0, 0, 1, 3], 'i') sp.coo_matrix( (self.data(xp), (row, self.col(xp))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_row_too_small(self, xp, sp): row = xp.array([0, -1, 1, 2], 'i') sp.coo_matrix( (self.data(xp), (row, self.col(xp))), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_col_too_large(self, xp, sp): col = xp.array([0, 1, 4, 2], 'i') sp.coo_matrix( (self.data(xp), (self.row(xp), col)), shape=self.shape) @testing.numpy_cupy_raises(sp_name='sp') def test_col_too_small(self, xp, sp): col = xp.array([0, -1, 3, 2], 'i') sp.coo_matrix( (self.data(xp), (self.row(xp), col)), shape=self.shape) def test_unsupported_dtype(self): with self.assertRaises(ValueError): cupy.sparse.coo_matrix( (self.data(cupy), (self.row(cupy), self.col(cupy))), shape=self.shape, dtype='i') @testing.parameterize(*testing.product({ 'make_method': [ '_make', '_make_unordered', '_make_empty', '_make_duplicate', '_make_shape'], 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128], })) @unittest.skipUnless(scipy_available, 'requires scipy') class TestCooMatrixScipyComparison(unittest.TestCase): @property def make(self): return globals()[self.make_method] @testing.numpy_cupy_equal(sp_name='sp') def test_dtype(self, xp, sp): m = self.make(xp, sp, self.dtype) return m.dtype @testing.numpy_cupy_equal(sp_name='sp') def test_nnz(self, xp, sp): m = self.make(xp, sp, self.dtype) return m.getnnz() @testing.numpy_cupy_array_equal(sp_name='sp') def test_asfptype(self, xp, sp): m = _make(xp, sp, self.dtype) return m.asfptype().toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_toarray(self, xp, sp): m = self.make(xp, sp, self.dtype) return m.toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_A(self, xp, sp): m = self.make(xp, sp, self.dtype) return m.A @testing.numpy_cupy_allclose(sp_name='sp') def test_tocoo(self, xp, sp): m = _make(xp, sp, self.dtype) return m.tocoo().toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_tocoo_copy(self, xp, sp): m = _make(xp, sp, self.dtype) n = m.tocoo(copy=True) self.assertIsNot(m.data, n.data) self.assertIsNot(m.row, n.row) self.assertIsNot(m.col, n.col) return n.toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_tocsc(self, xp, sp): m = self.make(xp, sp, self.dtype) return m.tocsc().toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_tocsc_copy(self, xp, sp): m = _make(xp, sp, self.dtype) n = m.tocsc(copy=True) self.assertIsNot(m.data, n.data) return n.toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_tocsr(self, xp, sp): m = self.make(xp, sp, self.dtype) return m.tocsr().toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_tocsr_copy(self, xp, sp): m = _make(xp, sp, self.dtype) n = m.tocsr(copy=True) self.assertIsNot(m.data, n.data) return n.toarray() # dot @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_scalar(self, xp, sp): m = _make(xp, sp, self.dtype) return m.dot(2.0).toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_numpy_scalar(self, xp, sp): m = _make(xp, sp, self.dtype) return m.dot(numpy.dtype(self.dtype).type(2.0)).toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_csr(self, xp, sp): m = _make(xp, sp, self.dtype) x = _make3(xp, sp, self.dtype) return m.dot(x).toarray() @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError) def test_dot_csr_invalid_shape(self, xp, sp): m = _make(xp, sp, self.dtype) x = sp.csr_matrix((5, 3), dtype=self.dtype) m.dot(x) @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_csc(self, xp, sp): m = _make(xp, sp, self.dtype) x = _make3(xp, sp, self.dtype).tocsc() return m.dot(x).toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_sparse(self, xp, sp): m = _make(xp, sp, self.dtype) x = _make3(xp, sp, self.dtype).tocoo() return m.dot(x).toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_zero_dim(self, xp, sp): m = _make(xp, sp, self.dtype) x = xp.array(2, dtype=self.dtype) return m.dot(x).toarray() @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_dense_vector(self, xp, sp): m = _make(xp, sp, self.dtype) x = xp.arange(4).astype(self.dtype) return m.dot(x) @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError) def test_dot_dense_vector_invalid_shape(self, xp, sp): m = _make(xp, sp, self.dtype) x = xp.arange(5).astype(self.dtype) m.dot(x) @testing.numpy_cupy_allclose(sp_name='sp') def test_dot_dense_matrix(self, xp, sp): m = _make(xp, sp, self.dtype) x = xp.arange(8).reshape(4, 2).astype(self.dtype) return m.dot(x) @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError) def test_dot_dense_matrix_invalid_shape(self, xp, sp): m = _make(xp, sp, self.dtype) x = xp.arange(10).reshape(5, 2).astype(self.dtype) m.dot(x) @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError) def test_dot_dense_ndim3(self, xp, sp): m = _make(xp, sp, self.dtype) x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype) m.dot(x) @testing.numpy_cupy_raises(sp_name='sp') def test_dot_unsupported(self, xp, sp): m = _make(xp, sp, self.dtype) m.dot(None) # __add__ @testing.numpy_cupy_allclose(sp_name='sp') def test_add_zero(self, xp, sp): m = _make(xp, sp, self.dtype) return (m + 0).toarray() @testing.numpy_cupy_raises(sp_name='sp') def test_add_scalar(self, xp, sp): m = _make(xp, sp, self.dtype) m + 1 @testing.numpy_cupy_allclose(sp_name='sp') def test_add_csr(self, xp, sp): m = _make(xp, sp, self.dtype) n = _make2(xp, sp, self.dtype) return (m + n).toarray()
<reponame>petebachant/Nortek-Python # contains C-style struct definitions and associated methods for various Nortek instruments # The base class is "NortekDataStructure" which is itself a subclass of ctypes.Structure # Individual data structures (e.g. velocity header) are a subclass of NortekDataStructure # They will inherit the calculateChecksum method from NortekDataStructure and need to define # # from ctypes import * import UserDict import struct import re import numpy import pdb from nortek import arrays as NortekDataArrays import datetime import pylab import logging moduleLogger = logging.getLogger( "Nortek." + __name__ ) class BCDTimeWord( Structure ): _fields_ = [ ( "minute", c_ubyte ), ( "second", c_ubyte ), ( "hour", c_ubyte ), ( "year", c_ubyte ), ( "month", c_ubyte ), ( "day", c_ubyte ) ] class Header( UserDict.UserDict ): def __init__( self, binaryDataString ): UserDict.UserDict.__init__( self ) self.binaryData = binaryDataString self.length = len( self.binaryData ) - 2 self.calculateChecksum() if self.checksum: self.interpretBinaryData() def calculateChecksum( self ): if self.length >= 4: reportedChecksum = struct.unpack( '<H', self.binaryData[ -2: ] ) checksumFormatString = '<' + 'h' * ( self.length / 2 ) if struct.calcsize( checksumFormatString ) == self.length: calculatedChecksum = ( int( 'b58c', base = 16 ) + sum( struct.unpack( checksumFormatString, self.binaryData[ 0:-2 ] ) ) ) % 65536 else: self.checksum = False if calculatedChecksum != reportedChecksum[ 0 ]: self.checksum = False else: self.checksum = True else: self.checksum = False def interpretBinaryData( self, instrumentType = None ): if self.binaryData[ 0:2 ] == '\xa5\x05': # Hardware Configuration syncByte, \ dataStructureID, \ sizeInWords, \ self[ 'serialNumber' ], \ boardConfigurationBit0, \ boardConfigurationBit1, \ self[ 'systemFrequency' ], \ self[ 'picVersion' ], \ self[ 'hardwareRevision' ], \ self[ 'recorderSize' ], \ hardwareStatus, \ spareWords, \ self[ 'firmwareVersion' ] = \ struct.unpack( '<ccH14sBBHHHHH12s4s', self.binaryData[ 0:-2 ] ) self[ 'serialNumber' ] = self[ 'serialNumber' ].rstrip() if ( re.search( 'VNO', self[ 'serialNumber' ] ) ): instrumentType = 'Vectrino' elif ( re.search( 'VEC', self[ 'serialNumber' ] ) ): instrumentType = 'Vector' elif ( re.search( 'AQD', self[ 'serialNumber' ] ) ): #if ( re.search( 'HR', self[ 'firmwareVersion' ] ) ): if ( float( self[ 'firmwareVersion' ] ) < 3.3 ): instrumentType = 'HR Profiler' else: instrumentType = 'Aquadopp Profiler' elif ( re.search( 'WPR', self[ 'hardwareConfiguration' ][ 'serialNumber' ] ) ): instrumentType = 'AWAC' else: instrumentType = 'unknown' if instrumentType is not 'Vectrino': if boardConfigurationBit0: # if True, there's a recorder self[ 'recorderInstalled' ] = True else: self[ 'recorderInstalled' ] = False self[ 'recorderSize' ] = 0 if boardConfigurationBit1: # if True, there's a compass self[ 'compassInstalled' ] = True else: self[ 'compassInstalled' ] = False return instrumentType elif self.binaryData[ 0:2 ] == '\xa5\x04': # head configuration syncByte, \ dataStructureID, \ sizeInWords, \ headConfiguration, \ self[ 'frequency' ], \ self[ 'headType' ], \ self[ 'serialNumber' ], \ systemData1, \ tempT, \ systemData2, \ distanceToSampleVolume, \ spareWords, \ self[ 'numberOfBeams' ] = \ struct.unpack( '<ccHHHH12s8s32s136sH20sH', self.binaryData[ 0:-2 ] ) if instrumentType is 'Vectrino': tempT = struct.unpack( '<16h', tempT ) self[ 'transformationMatrix' ] = \ numpy.matrix( numpy.reshape( tempT, ( 4, 4 ) ) ) / 4096.0 self[ 'distanceToSampleVolume' ] = \ round( 10 * 0.5 * ( ( 1.5 * distanceToSampleVolume )**2 - 622.98 ) / ( 1.5 * distanceToSampleVolume - 5.7 ) ) / 10 elif self.binaryData[ 0:2 ] == '\xa5\x00': # User Configuration syncByte, \ dataStructureID, \ sizeInWords, \ self[ 'T1' ], \ self[ 'T2' ], \ self[ 'T3' ], \ self[ 'T4' ], \ self[ 'T5' ], \ self[ 'numberOfPings' ], \ self[ 'averageInterval' ], \ self[ 'numberOfBeams' ], \ timingControlRegister, \ powerControlRegister, \ self[ 'A1' ], \ self[ 'B0' ], \ self[ 'B1' ], \ self[ 'compassUpdateRate' ], \ coordinateSystem, \ self[ 'numberOfCells' ], \ cellSize, \ self[ 'measurementInterval' ], \ self[ 'deploymentName' ], \ self[ 'recorderWrapMode' ], \ self[ 'deploymentStartTime' ], \ self[ 'diagnosticMeasurement_sampleInterval' ], \ modeWord, \ self[ 'soundSpeedAdjustmentFactor' ], \ self[ 'dianosticMeasurement_numberOfSamples' ], \ self[ 'diagnosticMeasurementnumberOfBeamsOrCellNumber' ], \ self[ 'diagnosticMeasurement_NumberOfPings' ], \ modeTestWord, \ analogInputAddress, \ hVersion, \ spareWords, \ velocityAdjustmentTable, \ self[ 'comments' ], \ self[ 'waveMeasurement_mode' ], \ self[ 'waveMeasurement_waveCellPositionPercent' ], \ self[ 'wave_T1' ], \ self[ 'wave_T2' ], \ self[ 'wave_T3' ], \ self[ 'wave_numberOfSamples' ], \ self[ 'A1_2' ], \ self[ 'B0_2' ], \ self[ 'B1_2' ], \ spareWords2, \ self[ 'analogOutputScaleFactor' ], \ self[ 'ambiguityResolutionCorrelationThreshold' ], \ spareWords3, \ self[ 'transmitPulseLengthSecondLag_counts' ], \ spareWords3, \ self[ 'stageMatchFilterConstants' ] = \ struct.unpack( '<cc' + 'H' * 19 + '6sH6sL' + 'H' * 9 + '180s' * 2 + 'H' * 14 + '30s16s', self.binaryData[ 0:-2 ] ) timingControlRegister = '{:016b}'.format( timingControlRegister )[ ::-1 ] powerControlRegister = '{:016b}'.format( powerControlRegister )[ ::-1 ] modeWord = '{:016b}'.format( modeWord )[ ::-1 ] if timingControlRegister[ 5:7 ] == '00' and powerControlRegister[ 5:7 ] == '00': self[ 'powerLevel' ] = 'High' elif timingControlRegister[ 5:7 ] == '10' and powerControlRegister[ 5:7 ] == '10': self[ 'powerLevel' ] = 'High-' elif timingControlRegister[ 5:7 ] == '01' and powerControlRegister[ 5:7 ] == '01': self[ 'powerLevel' ] = 'LOW+' elif timingControlRegister[ 5:7 ] == '11' and powerControlRegister[ 5:7 ] == '11': self[ 'powerLevel' ] = 'LOW' if timingControlRegister[ 1 ] is '1': self[ 'sampleMode' ] = 'burst' else: self[ 'sampleMode' ] = 'continuous' if coordinateSystem == 0: self[ 'coordinateSystem' ] = 'ENU' elif coordinateSystem == 1: self[ 'coordinateSystem' ] = 'XYZ' else: self[ 'coordinateSystem' ] = 'Beam' if instrumentType is 'Vectrino': self[ 'sampleRate' ] = round( 50000.0 / self[ 'averageInterval' ] ) del self[ 'averageInterval' ] elif instrumentType is 'Vector': self[ 'sampleVolumeSize' ] = cellSize self[ 'sampleRate' ] = 512. / self[ 'averageInterval' ] elif instrumentType is 'HR Profiler': self[ 'sampleRate' ] = 512. / self[ 'T5' ] if instrumentType is not 'Vectrino': self[ 'cellSize' ] = cellSize else: self[ 'sampleVolumeSize' ] = cellSize if instrumentType is 'Vector' or 'HR Profiler': if modeWord[ 4 ] is '1': self[ 'velocityScaling' ] = 0.1 # mm/s else: self[ 'velocityScaling' ] = 1 # mm/s formattedSoftwareVersion = str( hVersion/10000 ) + "." + str( (hVersion % 10000)/100 ) if str(hVersion % 100): formattedSoftwareVersion + "." + str(hVersion % 100) self[ 'softwareVersion' ] = formattedSoftwareVersion class NortekBinaryDataStructure( Structure ): _structureStart = 0 _structureStop = 0 def calculateChecksum( self, openDataFile ): originalPosition = openDataFile.tell() openDataFile.seek( originalPosition - self._sizeInBytes ) checksumDataType = c_short * ( self._sizeInBytes / 2 - 1 ) checksumData = checksumDataType() openDataFile.readinto( checksumData ) openDataFile.seek( originalPosition ) calculatedChecksum = int( 'b58c', base = 16 ) for aShort in checksumData: calculatedChecksum += aShort self.calculatedChecksum = calculatedChecksum % 65536 if self.checksum == self.calculatedChecksum: self.checksumResult = True return True else: self.checksumResult = False return False def incrementCounters( self ): pass def resetCounters( self ): pass def allocateDataArrays( self, anInstrument ): pass def moveIntoDataArrays( self, anInstrument ): pass class VectrinoVelocityData_binary( NortekBinaryDataStructure ): _fields_ = [ ( "status", c_char ), ( "count", c_ubyte ), ( "velocity", c_short * 4 ), ( "amplitude", c_ubyte * 4 ), ( "correlation", c_ubyte * 4 ), ( "checksum", c_ushort ) ] _sizeInBytes = 22 ensembleCounter = 0 ensembleCycleCounter = 0 def incrementCounters( self ): if self.ensembleCounter > 1 and self.count == 0: self.ensembleCycleCounter += 1 self.ensembleCounter = self.ensembleCycleCounter * 255 + self.count + self.ensembleCycleCounter # moduleLogger.info( 'Counter is {} and ensemble is {}'.format( # self.count, # self.ensembleCounter ) ) #self.ensembleCounter += 1 #while self.ensembleCounter != self.ensembleCycleCounter * 255 + self.count: # self.ensembleCounter += 1 def resetCounters( self ): self.ensembleCounter = 0 self.ensembleCycleCounter = 0 def allocateDataArrays( self, vectrinoInstrument ): samepleRate = vectrinoInstrument[ 'userConfiguration' ][ 'sampleRate' ] vectrinoInstrument[ 'velocity' ] = NortekDataArrays.VelocityDataArray( samepleRate, shape = ( 1, self.ensembleCounter, 4 ) ) vectrinoInstrument[ 'amplitude' ] = NortekDataArrays.GenericDataArray( samepleRate, shape = ( 1, self.ensembleCounter, 4 ) ) vectrinoInstrument[ 'snr' ] = NortekDataArrays.GenericDataArray( samepleRate, shape = ( 1, self.ensembleCounter, 4 ) ) vectrinoInstrument[ 'correlation' ] = NortekDataArrays.GenericDataArray( samepleRate, shape = ( 1, self.ensembleCounter, 4 ) ) vectrinoInstrument[ 'ensemble' ] = numpy.arange( 0, self.ensembleCounter, 1 ) self.resetCounters() def moveIntoDataArrays( self, vectrinoInstrument ): try: vectrinoInstrument[ 'velocity' ][ 'data' ][ 0, self.ensembleCounter, : ] = self.velocity[ 0:4 ] vectrinoInstrument[ 'amplitude' ][ 'data' ][ 0, self.ensembleCounter, : ] = self.amplitude[ 0:4 ] vectrinoInstrument[ 'correlation' ][ 'data' ][ 0, self.ensembleCounter, : ] = self.correlation[ 0:4 ] self.incrementCounters() except IndexError as error: pdb.set_trace() class VectrinoFileInfo_binary( NortekBinaryDataStructure ): _fields_ = [ ( "sizeInWords", c_short ), ( "field1", c_short ), ( "field2", c_short ), ( "field3", c_short ), ( "fileInfo", c_char * 14 ), ( "checksum", c_ushort ) ] _sizeInBytes = 26 fileInfoCounter = 0 class VectrinoVelocityHeader_binary( NortekBinaryDataStructure ): _fields_ = [ ( "sizeInWords", c_short ), ( "distance", c_short ), ( "distanceQuality", c_short ), ( "lag1", c_short ), ( "lag2", c_short ), ( "noise", c_ubyte * 4 ), ( "correlation", c_ubyte * 4 ), ( "temperature", c_short ), ( "speedOfSound", c_short ), ( "samplingVolumeAmplitude", c_ubyte * 4 ), ( "boundaryAmplitude", c_ubyte * 4 ), ( "z0PlusLag1", c_ubyte * 4 ), ( "z0PlusLag2", c_ubyte * 4 ), ( "checksum", c_ushort ) ] _sizeInBytes = 42 velocityHeaderCounter = 0 def moveIntoDataArrays( self, vectrinoInstrument ): vectrinoInstrument[ 'velocityHeader' ] = {} vectrinoInstrument[ 'velocityHeader' ][ 'noise' ] = { 'amplitude': {}, 'correlation': {} } vectrinoInstrument[ 'velocityHeader' ][ 'distance' ] = {} vectrinoInstrument[ 'velocityHeader' ][ 'sampleVolumeAmplitude' ] = {} vectrinoInstrument[ 'velocityHeader' ][ 'boundaryAmplitude' ] = {} # Vectrino velocity data header vectrinoInstrument[ 'distance' ][ 'measurement' ] = self.distance vectrinoInstrument[ 'distance' ][ 'quality' ] = self.distanceQuality vectrinoInstrument[ 'velocityHeader' ][ 'lag1' ] = self.lag1 / 1e6 vectrinoInstrument[ 'velocityHeader' ][ 'lag2' ] = self.lag2 / 1e6 for beamNumber in range( 1, 5, 1 ): vectrinoInstrument[ 'velocityHeader' ][ 'noise' ][ 'amplitude' ][ beamNumber ] \ = self.noise[ beamNumber - 1 ] vectrinoInstrument[ 'velocityHeader' ][ 'noise' ][ 'correlation' ][ beamNumber ] \ = self.correlation[ beamNumber - 1 ] vectrinoInstrument[ 'velocityHeader' ][ 'sampleVolumeAmplitude' ][ beamNumber
""" Utilities for fitting stacked (drizzled) spectra """ from collections import OrderedDict from imp import reload import astropy.io.fits as pyfits import astropy.units as u import numpy as np from . import utils from .utils import GRISM_COLORS, GRISM_MAJOR, GRISM_LIMITS, DEFAULT_LINE_LIST from .fitting import GroupFitter def make_templates(grism='G141', return_lists=False, fsps_templates=False, line_list=DEFAULT_LINE_LIST): """Generate template savefile This script generates the template sets with the emission line complexes and with individual lines. Parameters ---------- grism : str Grism of interest, which defines what FWHM to use for the line templates. return_lists : bool Return the templates rather than saving them to a file Returns ------- t_complexes, t_lines : list If `return` then return two lists of templates. Otherwise, store them to a `~numpy` save file "templates_{fwhm}.npy". """ from .multifit import MultiBeam if grism == 'G141': # WFC3/IR fwhm = 1100 elif grism == 'G800L': # ACS/UVIS fwhm = 1400 elif grism == 'G280': # WFC3/UVIS fwhm = 1500 elif grism == 'GRISM': # WFIRST fwhm = 350 else: fwhm = 700 # G102 # Line complex templates t_complexes = utils.load_templates(fwhm=fwhm, line_complexes=True, fsps_templates=fsps_templates) # Individual lines # line_list = ['SIII', 'SII', 'Ha', 'OI-6302', 'OIII', 'Hb', # 'OIII-4363', 'Hg', 'Hd', 'NeIII', 'OII', 'MgII'] t_lines = utils.load_templates(fwhm=fwhm, line_complexes=False, full_line_list=line_list, fsps_templates=fsps_templates) if return_lists: return t_complexes, t_lines else: # Save them to a file np.save('templates_{0}.npy'.format(fwhm), [t_complexes, t_lines]) print('Wrote `templates_{0}.npy`'.format(fwhm)) class StackFitter(GroupFitter): def __init__(self, files='gnt_18197.stack.fits', group_name=None, sys_err=0.02, mask_min=0.1, fit_stacks=True, fcontam=1, PAs=None, extensions=None, min_ivar=0.01, overlap_threshold=3, verbose=True, eazyp=None, eazy_ix=0, MW_EBV=0., chi2_threshold=1.5, min_DoF=200): """Object for fitting stacked spectra. Parameters ---------- files : str or list of str Stack FITS filename. If a list is supplied, e.g., the product of a `~glob` command, then append all specified files. group_name : str Rootname to associate with the object. If none, then default to `files`. sys_err : float Minimum systematic error, interpreted as a fractional error. The adjusted variance is taken to be >>> var = var0 + (sys_err*flux)**2 mask_min : float Only fit 2D pixels where the flat-flambda model has pixel values greater than `mask_min` times the maximum of the model. fit_stacks : bool Fit the stacks of each grism combined from all available PAs. If False, then fit the PAs individually. fcontam : float Parameter to control weighting of contaminated pixels for `fit_stacks=False`. """ if isinstance(files, list): file=files[0] else: file=files self.files = [file] if group_name is not None: self.group_name = group_name else: self.group_name = file if verbose: print('Load file {0}'.format(file)) self.file = file self.hdulist = pyfits.open(file) self.min_ivar = min_ivar self.sys_err = sys_err self.fcontam = fcontam self.MW_EBV = MW_EBV self.h0 = self.hdulist[0].header.copy() #self.Ngrism = self.h0['NGRISM'] self.grisms = [] self.ext = [] for i in range(self.h0['NGRISM']): g = self.h0['GRISM{0:03d}'.format(i+1)] self.grisms.append(g) if fit_stacks: if extensions is not None: if g not in extensions: continue self.ext.append(g) else: ng = self.h0['N{0}'.format(g)] for j in range(ng): pa = self.h0['{0}{1:02d}'.format(g, j+1)] if PAs is not None: if pa not in PAs: continue ext = '{0},{1}'.format(g,pa) if extensions is not None: if ext not in extensions: continue self.ext.append(ext) self.N = len(self.ext) self.beams = [] pop = [] for i in range(self.N): E_i = StackedSpectrum(file=self.file, sys_err=sys_err, mask_min=mask_min, extver=self.ext[i], mask_threshold=-1, fcontam=fcontam, min_ivar=min_ivar, MW_EBV=MW_EBV) E_i.compute_model() if np.isfinite(E_i.kernel.sum()) & (E_i.DoF >= min_DoF): self.beams.append(E_i) else: pop.append(i) for i in pop[::-1]: self.N -= 1 p = self.ext.pop(i) # Get some parameters from the beams self.id = self.h0['ID'] self.ra = self.h0['RA'] self.dec = self.h0['DEC'] self.Asave = {} ## Photometry self.is_spec = 1 self.Nphot = 0 ## Parse the beam data self._parse_beams_list() if not fit_stacks: # self.mask_drizzle_overlaps(threshold=overlap_threshold, # verbose=verbose) if chi2_threshold > 0: orig_ext = [e for e in self.ext] fit_log, keep_dict, has_bad = self.check_for_bad_PAs(poly_order=3, chi2_threshold=chi2_threshold, fit_background=True, reinit=True, verbose=False) if has_bad & verbose: print('Found bad PA. New list: {0}'.format(keep_dict)) if verbose: print(' {0}'.format(' '.join(self.ext))) # Read multiple if isinstance(files, list): if len(files) > 1: for file in files[1:]: extra = StackFitter(files=file, sys_err=sys_err, mask_min=mask_min, fit_stacks=fit_stacks, fcontam=fcontam, pas=pas, extensions=extensions, min_ivar=min_ivar, overlap_threshold=overlap_threshold, eazyp=eazyp, eazy_ix=eazy_ix, chi2_threshold=chi2_threshold, verbose=verbose) self.extend(extra) self.idf = np.hstack([b.scif*0+ib for ib, b in enumerate(self.beams)]) self.idf = np.cast[int](self.idf) # if eazyp is not None: # self.eazyp = eazyp # # # TBD: do matching to eazyp.cat directly? # self.eazy_ix = eazy_ix # # ok_phot = (eazyp.efnu[eazy_ix,:] > 0) & (eazyp.fnu[eazy_ix,:] > eazyp.param['NOT_OBS_THRESHOLD']) & np.isfinite(eazyp.fnu[eazy_ix,:]) & np.isfinite(eazyp.efnu[eazy_ix,:]) # ok_phot = np.squeeze(ok_phot) # self.ok_phot = ok_phot # # self.Nphot = ok_phot.sum() # if self.Nphot > 0: # # # F-lambda photometry, 1e-19 erg/s/cm2/A # self.photom_eflam = (eazyp.efnu[eazy_ix,:]*eazyp.to_flam*eazyp.zp*eazyp.ext_corr/100.)[ok_phot] # self.photom_flam = (eazyp.fnu[eazy_ix,:]*eazyp.to_flam*eazyp.zp*eazyp.ext_corr/100.)[ok_phot] # self.photom_lc = eazyp.lc[ok_phot] # # self.scif = np.hstack((self.scif, self.photom_flam)) # self.ivarf = np.hstack((self.ivarf, 1/self.photom_eflam**2)) # self.sivarf = np.hstack((self.sivarf, 1/self.photom_eflam)) # self.wavef = np.hstack((self.wavef, self.photom_lc)) # # self.weightf = np.hstack((self.weightf, np.ones(self.Nphot))) # self.fit_mask = np.hstack((self.fit_mask, np.ones(self.Nphot, dtype=bool))) # self.DoF += self.Nphot # self.phot_scale = np.array([10.]) def _parse_beams_list(self): """ """ # Parse from self.beams list self.N = len(self.beams) self.ext = [E.extver for E in self.beams] self.Ngrism = OrderedDict() for beam in self.beams: if beam.grism in self.Ngrism: self.Ngrism[beam.grism] += 1 else: self.Ngrism[beam.grism] = 1 # Make "PA" attribute self.PA = OrderedDict() for g in self.Ngrism: self.PA[g] = OrderedDict() for i in range(self.N): grism = self.ext[i].split(',')[0] if ',' in self.ext[i]: PA = float(self.ext[i].split(',')[1]) else: PA = 0 if PA in self.PA[grism]: self.PA[grism][PA].append(i) else: self.PA[grism][PA] = [i] self.grisms = list(self.PA.keys()) self.Ntot = np.sum([E.size for E in self.beams]) self.scif = np.hstack([E.scif for E in self.beams]) self.ivarf = np.hstack([E.ivarf for E in self.beams]) self.wavef = np.hstack([E.wavef for E in self.beams]) self.weightf = np.hstack([E.weightf for E in self.beams]) #self.ivarf *= self.weightf self.sivarf = np.sqrt(self.ivarf) self.fit_mask = np.hstack([E.fit_mask for E in self.beams]) self.fit_mask &= self.ivarf > self.min_ivar*self.ivarf.max() # Dummy parameter. Implemented for MultiBeam self.sens_mask = 1. self.DoF = int((self.fit_mask*self.weightf).sum()) #self.Nmask = self.fit_mask.sum() self.Nmask = np.sum([E.fit_mask.sum() for E in self.beams]) self.slices = self._get_slices(masked=False) self.A_bg = self._init_background(masked=False) self.Asave = {} self._update_beam_mask() self.A_bgm = self._init_background(masked=True) self.flat_flam = np.hstack([E.flat_flam for E in self.beams]) self.initialize_masked_arrays() def extend(self, st): """ Append second StackFitter objects to `self`. """ self.beams.extend(st.beams) self._parse_beams_list() # self.grisms.extend(st.grisms) # self.grisms = list(np.unique(self.grisms)) # # self.Ngrism = {} # for grism in self.grisms: # self.Ngrism[grism] = 0 # # for beam in self.beams: # self.Ngrism[beam.grism] += 1 # # #self.Ngrism = len(self.grisms) # # self.N += st.N # self.ext.extend(st.ext) # self.files.extend(st.files) # # # Re-init # self.Ntot = np.sum([E.size for E in self.beams]) # self.scif = np.hstack([E.scif for E in self.beams]) # self.ivarf = np.hstack([E.ivarf for E in self.beams]) # self.wavef = np.hstack([E.wavef for E in self.beams]) # # self.weightf = np.hstack([E.weightf for E in self.beams]) # #self.ivarf *= self.weightf # # self.sivarf = np.sqrt(self.ivarf) # # self.fit_mask = np.hstack([E.fit_mask for E in self.beams]) # self.fit_mask &= self.ivarf > self.min_ivar*self.ivarf.max() # # self.slices = self._get_slices(masked=False) # self.A_bg = self._init_background(masked=False) # # self._update_beam_mask() # self.A_bgm = self._init_background(masked=True) # # self.Nmask = self.fit_mask.sum() # self.DoF = int((self.fit_mask*self.weightf).sum()) # # self.flat_flam = np.hstack([E.flat_flam for E in self.beams]) def check_for_bad_PAs(self, poly_order=1, chi2_threshold=1.5, fit_background=True, reinit=True, verbose=False): """ """ wave = np.linspace(2000,2.5e4,100) poly_templates = utils.polynomial_templates(wave, order=poly_order) fit_log = OrderedDict() keep_dict = OrderedDict() has_bad = False keep_beams = [] for g in self.PA: fit_log[g] = OrderedDict() keep_dict[g] = [] for pa in self.PA[g]: extensions = [self.ext[i] for i in self.PA[g][pa]] mb_i = StackFitter(self.file, fcontam=self.fcontam, sys_err=self.sys_err, extensions=extensions, fit_stacks=False, verbose=verbose, chi2_threshold=-1) try: chi2, _, _, _ = mb_i.xfit_at_z(z=0, templates=poly_templates, fit_background=fit_background) except: chi2 = 1e30 if False: p_i = mb_i.template_at_z(z=0, templates=poly_templates, fit_background=fit_background, fitter='lstsq', fwhm=1400, get_uncertainties=2) fit_log[g][pa] = {'chi2': chi2, 'DoF': mb_i.DoF, 'chi_nu': chi2/np.maximum(mb_i.DoF, 1)} min_chinu = 1e30 for pa in self.PA[g]: min_chinu = np.minimum(min_chinu,
"""Definitions of ground truth NSRTs for all environments.""" import itertools from typing import List, Sequence, Set, cast import numpy as np from predicators.src.envs import get_or_create_env from predicators.src.envs.behavior import BehaviorEnv from predicators.src.envs.behavior_options import grasp_obj_param_sampler, \ navigate_to_param_sampler, place_ontop_obj_pos_sampler from predicators.src.envs.doors import DoorsEnv from predicators.src.envs.painting import PaintingEnv from predicators.src.envs.pddl_env import _PDDLEnv from predicators.src.envs.playroom import PlayroomEnv from predicators.src.envs.repeated_nextto_painting import \ RepeatedNextToPaintingEnv from predicators.src.envs.satellites import SatellitesEnv from predicators.src.envs.tools import ToolsEnv from predicators.src.settings import CFG from predicators.src.structs import NSRT, Array, GroundAtom, LiftedAtom, \ Object, ParameterizedOption, Predicate, State, Type, Variable from predicators.src.utils import null_sampler def get_gt_nsrts(predicates: Set[Predicate], options: Set[ParameterizedOption]) -> Set[NSRT]: """Create ground truth NSRTs for an env.""" if CFG.env in ("cover", "cover_hierarchical_types", "cover_typed_options", "cover_regrasp", "cover_multistep_options", "pybullet_cover"): nsrts = _get_cover_gt_nsrts() elif CFG.env == "cluttered_table": nsrts = _get_cluttered_table_gt_nsrts() elif CFG.env == "cluttered_table_place": nsrts = _get_cluttered_table_gt_nsrts(with_place=True) elif CFG.env in ("blocks", "pybullet_blocks"): nsrts = _get_blocks_gt_nsrts() elif CFG.env == "behavior": nsrts = _get_behavior_gt_nsrts() # pragma: no cover elif CFG.env in ("painting", "repeated_nextto_painting"): nsrts = _get_painting_gt_nsrts() elif CFG.env == "tools": nsrts = _get_tools_gt_nsrts() elif CFG.env == "playroom": nsrts = _get_playroom_gt_nsrts() elif CFG.env == "repeated_nextto": nsrts = _get_repeated_nextto_gt_nsrts(CFG.env) elif CFG.env == "repeated_nextto_single_option": nsrts = _get_repeated_nextto_single_option_gt_nsrts() elif CFG.env == "screws": nsrts = _get_screws_gt_nsrts() elif CFG.env.startswith("pddl_"): nsrts = _get_pddl_env_gt_nsrts(CFG.env) elif CFG.env == "touch_point": nsrts = _get_touch_point_gt_nsrts() elif CFG.env == "stick_button": nsrts = _get_stick_button_gt_nsrts() elif CFG.env == "doors": nsrts = _get_doors_gt_nsrts() elif CFG.env == "coffee": nsrts = _get_coffee_gt_nsrts() elif CFG.env in ("satellites", "satellites_simple"): nsrts = _get_satellites_gt_nsrts() else: raise NotImplementedError("Ground truth NSRTs not implemented") # Filter out excluded predicates from NSRTs, and filter out NSRTs whose # options are excluded. final_nsrts = set() for nsrt in nsrts: if nsrt.option not in options: continue nsrt = nsrt.filter_predicates(predicates) final_nsrts.add(nsrt) return final_nsrts def _get_from_env_by_names(env_name: str, names: Sequence[str], env_attr: str) -> List: """Helper for loading types, predicates, and options by name.""" env = get_or_create_env(env_name) name_to_env_obj = {} for o in getattr(env, env_attr): name_to_env_obj[o.name] = o assert set(name_to_env_obj).issuperset(set(names)) return [name_to_env_obj[name] for name in names] def _get_types_by_names(env_name: str, names: Sequence[str]) -> List[Type]: """Load types from an env given their names.""" return _get_from_env_by_names(env_name, names, "types") def _get_predicates_by_names(env_name: str, names: Sequence[str]) -> List[Predicate]: """Load predicates from an env given their names.""" return _get_from_env_by_names(env_name, names, "predicates") def _get_options_by_names(env_name: str, names: Sequence[str]) -> List[ParameterizedOption]: """Load parameterized options from an env given their names.""" return _get_from_env_by_names(env_name, names, "options") def _get_cover_gt_nsrts() -> Set[NSRT]: """Create ground truth NSRTs for CoverEnv or environments that inherit from CoverEnv.""" # Types block_type, target_type, robot_type = _get_types_by_names( CFG.env, ["block", "target", "robot"]) # Objects block = Variable("?block", block_type) robot = Variable("?robot", robot_type) target = Variable("?target", target_type) # Predicates IsBlock, IsTarget, Covers, HandEmpty, Holding = \ _get_predicates_by_names(CFG.env, ["IsBlock", "IsTarget", "Covers", "HandEmpty", "Holding"]) # Options if CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types", "cover_regrasp"): PickPlace, = _get_options_by_names(CFG.env, ["PickPlace"]) elif CFG.env in ("cover_typed_options", "cover_multistep_options"): Pick, Place = _get_options_by_names(CFG.env, ["Pick", "Place"]) nsrts = set() # Pick parameters = [block] holding_predicate_args = [block] if CFG.env == "cover_multistep_options": parameters.append(robot) holding_predicate_args.append(robot) preconditions = {LiftedAtom(IsBlock, [block]), LiftedAtom(HandEmpty, [])} add_effects = {LiftedAtom(Holding, holding_predicate_args)} delete_effects = {LiftedAtom(HandEmpty, [])} if CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types", "cover_regrasp"): option = PickPlace option_vars = [] elif CFG.env == "cover_typed_options": option = Pick option_vars = [block] elif CFG.env == "cover_multistep_options": option = Pick option_vars = [block, robot] if CFG.env == "cover_multistep_options": def pick_sampler(state: State, goal: Set[GroundAtom], rng: np.random.Generator, objs: Sequence[Object]) -> Array: # The only things that change are the block's grasp, and the # robot's grip, holding, x, and y. assert len(objs) == 2 block, robot = objs assert block.is_instance(block_type) assert robot.is_instance(robot_type) bx, by = state.get(block, "x"), state.get(block, "y") rx, ry = state.get(robot, "x"), state.get(robot, "y") bw = state.get(block, "width") if CFG.cover_multistep_goal_conditioned_sampling: # Goal conditioned sampling currently assumes one goal. assert len(goal) == 1 goal_atom = next(iter(goal)) t = goal_atom.objects[1] tx, tw = state.get(t, "x"), state.get(t, "width") thr_found = False # target hand region # Loop over objects in state to find target hand region, # whose center should overlap with the target. for obj in state.data: if obj.type.name == "target_hand_region": tlb = state.get(obj, "lb") tub = state.get(obj, "ub") tm = (tlb + tub) / 2 # midpoint of hand region if tx - tw / 2 < tm < tx + tw / 2: thr_found = True break assert thr_found if CFG.cover_multistep_degenerate_oracle_samplers: desired_x = float(bx) elif CFG.cover_multistep_goal_conditioned_sampling: # Block position adjusted by target/ thr offset desired_x = bx + (tm - tx) else: desired_x = rng.uniform(bx - bw / 2, bx + bw / 2) # This option changes the grasp for the block from -1.0 to 1.0, so # the delta is 1.0 - (-1.0) = 2.0 block_param = [2.0] # The grip changes from -1.0 to 1.0. # The holding changes from -1.0 to 1.0. # x, y, grip, holding robot_param = [desired_x - rx, by - ry, 2.0, 2.0] param = block_param + robot_param return np.array(param, dtype=np.float32) else: def pick_sampler(state: State, goal: Set[GroundAtom], rng: np.random.Generator, objs: Sequence[Object]) -> Array: del goal # unused assert len(objs) == 1 b = objs[0] assert b.is_instance(block_type) if CFG.env == "cover_typed_options": lb = float(-state.get(b, "width") / 2) ub = float(state.get(b, "width") / 2) elif CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types", "cover_regrasp"): lb = float(state.get(b, "pose") - state.get(b, "width") / 2) lb = max(lb, 0.0) ub = float(state.get(b, "pose") + state.get(b, "width") / 2) ub = min(ub, 1.0) return np.array(rng.uniform(lb, ub, size=(1, )), dtype=np.float32) pick_nsrt = NSRT("Pick", parameters, preconditions, add_effects, delete_effects, set(), option, option_vars, pick_sampler) nsrts.add(pick_nsrt) # Place (to Cover) parameters = [block, target] holding_predicate_args = [block] if CFG.env == "cover_multistep_options": parameters = [block, robot, target] holding_predicate_args.append(robot) preconditions = { LiftedAtom(IsBlock, [block]), LiftedAtom(IsTarget, [target]), LiftedAtom(Holding, holding_predicate_args) } add_effects = { LiftedAtom(HandEmpty, []), LiftedAtom(Covers, [block, target]) } delete_effects = {LiftedAtom(Holding, holding_predicate_args)} if CFG.env == "cover_regrasp": Clear, = _get_predicates_by_names("cover_regrasp", ["Clear"]) preconditions.add(LiftedAtom(Clear, [target])) delete_effects.add(LiftedAtom(Clear, [target])) if CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types", "cover_regrasp"): option = PickPlace option_vars = [] elif CFG.env == "cover_typed_options": option = Place option_vars = [target] elif CFG.env == "cover_multistep_options": option = Place option_vars = [block, robot, target] if CFG.env == "cover_multistep_options": def place_sampler(state: State, goal: Set[GroundAtom], rng: np.random.Generator, objs: Sequence[Object]) -> Array: if CFG.cover_multistep_goal_conditioned_sampling: # Goal conditioned sampling currently assumes one goal. assert len(goal) == 1 goal_atom = next(iter(goal)) t = goal_atom.objects[1] tx, tw = state.get(t, "x"), state.get(t, "width") thr_found = False # target hand region # Loop over objects in state to find target hand region, # whose center should overlap with the target. for obj in state.data: if obj.type.name == "target_hand_region": lb = state.get(obj, "lb") ub = state.get(obj, "ub") m = (lb + ub) / 2 # midpoint of hand region if tx - tw / 2 < m < tx + tw / 2: thr_found = True break assert thr_found assert len(objs) == 3 block, robot, target = objs assert block.is_instance(block_type) assert robot.is_instance(robot_type) assert target.is_instance(target_type) rx = state.get(robot, "x") tx, tw = state.get(target, "x"), state.get(target, "width") if CFG.cover_multistep_degenerate_oracle_samplers: desired_x = float(tx) elif CFG.cover_multistep_goal_conditioned_sampling: desired_x = m # midpoint of hand region else: desired_x = rng.uniform(tx - tw / 2, tx + tw / 2) delta_x = desired_x - rx # This option changes the grasp for the block from 1.0 to -1.0, so # the delta is -1.0 - 1.0 = -2.0. # x, grasp block_param = [delta_x, -2.0] # The grip changes from 1.0 to -1.0. # The holding changes from 1.0 to -1.0. # x, grip, holding robot_param = [delta_x, -2.0, -2.0] param = block_param + robot_param return np.array(param, dtype=np.float32) else: def place_sampler(state: State, goal: Set[GroundAtom], rng: np.random.Generator, objs: Sequence[Object]) -> Array: del goal # unused assert len(objs) == 2 t = objs[-1] assert t.is_instance(target_type) lb = float(state.get(t, "pose") - state.get(t, "width") / 10) lb = max(lb, 0.0) ub = float(state.get(t, "pose") + state.get(t, "width") / 10) ub = min(ub, 1.0) return np.array(rng.uniform(lb, ub, size=(1, )), dtype=np.float32) place_nsrt = NSRT("Place", parameters, preconditions, add_effects, delete_effects, set(), option, option_vars, place_sampler) nsrts.add(place_nsrt) # Place (not on any target) if CFG.env == "cover_regrasp": parameters = [block] preconditions = { LiftedAtom(IsBlock, [block]), LiftedAtom(Holding, [block]) } add_effects = { LiftedAtom(HandEmpty, []), } delete_effects = {LiftedAtom(Holding, [block])} option
<reponame>xiaofengxie128/Proteomic-Data-Manager from django.db import models from django.conf import settings from datetime import date import datetime import xmltodict from django_currentuser.db.models import CurrentUserField from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User import pickle import os import subprocess import time import shutil from django.utils import timezone import json from os.path import exists import random import string from django.db.models.signals import post_save from django.core.files.uploadedfile import InMemoryUploadedFile from django.dispatch import receiver from django.contrib.contenttypes import fields class NoteFile(models.Model): notefile = models.FileField(upload_to=f"notefiles/{date.today().year}/ \ {date.today().month}/{date.today().day}", blank=True, null=True) class SsdStorage(models.Model): filelocation = models.FileField(upload_to="temp/", blank=True, null=True) class HdStorage(models.Model): filelocation = models.FileField(blank=True, null=True) class RemoteStorage(models.Model): filelocation = models.FileField(blank=True, null=True) class OfflineStorage(models.Model): filelocation = models.FileField(blank=True, null=True) class PklStorage(models.Model): filelocation = models.FileField(blank=True, null=True) class RawFile(models.Model): """This is the main class, used to describe a MS run """ run_name = models.TextField(max_length=100, blank=True, null=True) plot_label = models.TextField(max_length=100, blank=True, null=True) project_name = models.TextField(max_length=100, blank=True, null=True) run_desc = models.TextField(max_length=1000, blank=True, null=True) qc_tool = models.IntegerField(blank=True, null=True, default=0) qc_mbr = models.TextField(max_length=1000, blank=True, null=True) instrument_model = models.TextField(max_length=100, blank=True, null=True) instrument_sn = models.TextField(max_length=100, blank=True, null=True) creator = CurrentUserField() notes = models.TextField(max_length=1000, blank=True, null=True) note_file = models.ManyToManyField(NoteFile, blank=True) temp_data = models.BooleanField(default=False, null=True) sample_obj = models.IntegerField(blank=True, null=True, default=0) column_sn = models.TextField(max_length=100, blank=True, null=True) spe_sn = models.TextField(max_length=100, blank=True, null=True) rawfile = models.FileField(upload_to="temp/", blank=True, null=True) storage_option = models.IntegerField(blank=True, null=True, default=0) uploaded_at = models.DateTimeField(auto_now_add=True) acquisition_time = models.DateTimeField(blank=True, null=True) sample_type = models.TextField(max_length=100, blank=True, null=True) pklfile = models.FileField(null=True, blank=True,) file_size = models.DecimalField(default=0, max_digits=5, decimal_places=3, blank=True, null=True) content_extracted = models.BooleanField(default=False, null=True) content_type = models.ForeignKey(ContentType, on_delete=models.PROTECT, null=True, blank=True,) object_id = models.PositiveIntegerField(default=5) current_raw = fields.GenericForeignKey( "content_type", "object_id") ssd_storage = models.ForeignKey( "SsdStorage", on_delete=models.SET_NULL, null=True, blank=True, ) hd_storage = models.ForeignKey( "HdStorage", on_delete=models.SET_NULL, null=True, blank=True, ) remote_storage = models.ForeignKey( "RemoteStorage", on_delete=models.SET_NULL, null=True, blank=True, ) offline_storage = models.ForeignKey( "OfflineStorage", on_delete=models.SET_NULL, null=True, blank=True, ) pkl_storage = models.ForeignKey( "PklStorage", on_delete=models.SET_NULL, null=True, blank=True, ) qc_content_type = models.ForeignKey( ContentType, related_name="qc_content", on_delete=models.PROTECT, null=True, blank=True,) qc_object_id = models.PositiveIntegerField(default=1) current_qc = fields.GenericForeignKey( "qc_content_type", "qc_object_id") # TODO:remove pklfile field, replace with pkl_storage, should be very easy. @receiver(post_save, sender=RawFile, dispatch_uid="update and move the file") def update_raw(sender, instance, **kwargs): """Perform file convertion, meta info extraction when save a new record """ # TODO: add error handling if can"t convert file or read file if not instance.content_extracted: # for Thermo raw files if instance.rawfile.name.split(".")[-1] == "raw": rawfile_name = instance.rawfile.name rawfile_name_only = rawfile_name.split("/")[-1] try: """ have to do this batch approach as the following directly mono command approch (tried 5 hours) won't work result = subprocess. run(['mono',f'/home/rtklab/Documents/data_manager/ ThermoRawFileParser/ThermoRawFileParser.exe -i=/home/rtklab/Documents/data_manager/media/{rawfile_name} -m=0'], capture_output=True) """ batch_file = open("ThermoRawFileParser/run.sh", "w") batch_file.write(" #!/bin/bash \n") command_1 = "mono /home/rtklab/Documents/data_manager/"\ "ThermoRawFileParser/ThermoRawFileParser.exe "\ "-d=/home/rtklab/Documents/data_manager/media/temp"\ " -m=0 -f=1 -L=1,2" batch_file.write(command_1) batch_file.close() command_2 = "/home/rtklab/Documents/data_manager/"\ "ThermoRawFileParser/run.sh" result = subprocess.run( ["sh", command_2], capture_output=True) print(result) time.sleep(1) filename = os.path.join( "/home/rtklab/Documents/data_manager/media/temp/", rawfile_name_only[:-4]+"-metadata.json") f = open(filename,) data = json.load(f) f.close() os.remove(filename) RawFile.objects.filter(pk=instance.pk).update( acquisition_time=datetime.datetime.strptime( data["FileProperties"][2]["value"], "%m/%d/%Y %H:%M:%S")) RawFile.objects.filter(pk=instance.pk).update( instrument_model=data["InstrumentProperties"][0]["value"]) RawFile.objects.filter(pk=instance.pk).update( instrument_sn=data["InstrumentProperties"][2]["value"]) RawFile.objects.filter(pk=instance.pk).update( sample_type=data["SampleData"][0]["value"]) to_tz = timezone.get_default_timezone() file_year, file_month, file_date = RawFile.objects.\ filter(pk=instance.pk)[ 0].acquisition_time.astimezone(to_tz).year,\ RawFile.objects.filter(pk=instance.pk)[ 0].acquisition_time.astimezone( to_tz).month, RawFile.objects.filter( pk=instance.pk)[0].acquisition_time.astimezone( to_tz).day if instance.project_name != "": file_dir = os.path.join( settings.MEDIA_ROOT, (f"rawfiles/{file_year}/" f"{file_month}/{instance.project_name}/")) else: file_dir = os.path.join( settings.MEDIA_ROOT, f"rawfiles/{file_year}/{file_month}/{file_date}/") check_folder = os.path.isdir(file_dir) if not check_folder: os.makedirs(file_dir) newfile_name = f"{file_dir}/{rawfile_name_only}" if exists(newfile_name): random_str = "".join(random.choice( string.ascii_lowercase) for i in range(4)) newfile_name = (f"{file_dir}/" f"{rawfile_name_only.split('.')[0]}" f"_{random_str}.raw") shutil.move( (f"/home/rtklab/Documents/" f"data_manager/media/temp/{rawfile_name_only}"), newfile_name) # create the SsdStorage and point current to it if instance.project_name != "": ssd_filelocation = (f"rawfiles/{file_year}/{file_month}/" f"{instance.project_name}/" f"{newfile_name.split('/')[-1]}") else: ssd_filelocation = (f"rawfiles/{file_year}/{file_month}" f"/{file_date}/" f"{newfile_name.split('/')[-1]}") ssdform = { "filelocation": ssd_filelocation } ssdob = SsdStorage.objects.create(**ssdform, ) RawFile.objects.filter( pk=instance.pk).update(ssd_storage=ssdob) RawFile.objects.filter(pk=instance.pk).update( rawfile=ssdob.filelocation) # ct = ContentType.objects.get_for_model(SsdStorage) RawFile.objects.filter(pk=instance.pk).update( content_type=ct, object_id=ssdob.pk) ssdfilename = os.path.join( settings.MEDIA_ROOT, ssd_filelocation) filenamelen = len(ssd_filelocation.split("/")[-1]) des_path = (f"media/hdstorage/" f"{ssd_filelocation[:filenamelen*-1]}") isExist = os.path.exists(des_path) if not isExist: # Create a new directory because it does not exist os.makedirs(des_path) shutil.copy(ssdfilename, des_path) hd_filelocation = f"hdstorage/{ssd_filelocation}" hdform = { "filelocation": hd_filelocation } hdob = HdStorage.objects.create(**hdform, ) RawFile.objects.filter(pk=instance.pk).update(hd_storage=hdob) file_size = os.path.getsize( os.path.join(settings.MEDIA_ROOT, RawFile.objects.filter( pk=instance.pk)[ 0].current_raw.filelocation.name))/1024/1024/1024 RawFile.objects.filter(pk=instance.pk).update( file_size=file_size) # auto create QC SpectromineQueue # for some reason app and webiste generate different bool :(, # need to happen before content extraction as # sometimes extraction may fail if (instance.qc_tool != "0" and instance.qc_tool != 0): if (instance.qc_tool == "1" or instance.qc_tool == 1): # 1 is msfragger new_queue = { "creator": instance.creator, } new_queue_obj = MsfraggerQueue.objects.create( **new_queue, ) new_queue_obj.rawfile.add( RawFile.objects.filter(pk=instance.pk).first()) new_queue_obj.save() ct = ContentType.objects.get_for_model(MsfraggerQueue) RawFile.objects.filter( pk=instance.pk).update(qc_content_type=ct) RawFile.objects.filter(pk=instance.pk).update( qc_object_id=new_queue_obj.pk) elif (instance.qc_tool == "3" or instance.qc_tool == 3): # 3 is protein discoverer OTOT new_queue = { "creator": instance.creator, "analysis_name": instance.pk, "processing_method": InMemoryUploadedFile(open( f'media/pd/methods/qc_built_in/OTOT_' f'{instance.sample_obj}.pdProcessingWF', 'r'), None, f'OTOT_{instance.sample_obj}.' f'pdProcessingWF', None, None, None), "consensus_method": InMemoryUploadedFile(open( f'media/pd/methods/qc_built_in/OTOT_' f'{instance.sample_obj}.pdConsensusWF', 'r'), None, f'OTOT_{instance.sample_obj}.' f'pdConsensusWF', None, None, None), } new_queue_obj = PdQueue.objects.create(**new_queue, ) new_queue_obj.rawfile.add( RawFile.objects.filter(pk=instance.pk).first()) new_queue_obj.save() ct = ContentType.objects.get_for_model(PdQueue) RawFile.objects.filter( pk=instance.pk).update(qc_content_type=ct) RawFile.objects.filter(pk=instance.pk).update( qc_object_id=new_queue_obj.pk) elif (instance.qc_tool == "5" or instance.qc_tool == 5): # 5 is protein discoverer ITIT new_queue = { "creator": instance.creator, "analysis_name": instance.pk, "processing_method": InMemoryUploadedFile(open( f'media/pd/methods/qc_built_in/OTIT_' f'{instance.sample_obj}.pdProcessingWF', 'r'), None, f'OTIT_{instance.sample_obj}.' f'pdProcessingWF', None, None, None), "consensus_method": InMemoryUploadedFile(open( f'media/pd/methods/qc_built_in/OTIT_' f'{instance.sample_obj}.pdConsensusWF', 'r'), None, f'OTIT_{instance.sample_obj}.' f'pdConsensusWF', None, None, None), } new_queue_obj = PdQueue.objects.create(**new_queue, ) new_queue_obj.rawfile.add( RawFile.objects.filter(pk=instance.pk).first()) new_queue_obj.save() ct = ContentType.objects.get_for_model(PdQueue) RawFile.objects.filter( pk=instance.pk).update(qc_content_type=ct) RawFile.objects.filter(pk=instance.pk).update( qc_object_id=new_queue_obj.pk) # extract the mzML mzml_filename = rawfile_name_only[:-4]+".mzML" # defining an xml string with open(os.path.join("media/temp", mzml_filename), "r") as xml_obj: # coverting the xml data to Python dictionary my_dict = xmltodict.parse(xml_obj.read()) xml_obj.close() os.remove(os.path.join("media/temp", mzml_filename)) ms1_rt = [] ms1_basemz = [] ms1_basemzintensity = [] ms1_ticintensity = [] ms2_rt = [] ms2_injectiontime = [] for i in range(0, len(my_dict["mzML"]["run"] ["spectrumList"]["spectrum"])): if my_dict["mzML"]["run"][ "spectrumList"]["spectrum"][i]["cvParam"][0][ "@value"] == "1": ms1_rt.append(float(my_dict["mzML"]["run"][ "spectrumList"]["spectrum"][i]["scanList"][ "scan"]["cvParam"][0]["@value"])) ms1_basemz.append(round(float( my_dict["mzML"]["run"]["spectrumList"][ "spectrum"][i]["cvParam"][5]["@value"]), 2)) ms1_basemzintensity.append(float( my_dict["mzML"]["run"][ "spectrumList"]["spectrum"][i][ "cvParam"][6]["@value"])) ms1_ticintensity.append(float( my_dict["mzML"]["run"]["spectrumList"][ "spectrum"][i]["cvParam"][3]["@value"])) if my_dict["mzML"]["run"]["spectrumList"][ "spectrum"][i]["cvParam"][0]["@value"] == "2": ms2_rt.append(float(my_dict["mzML"][ "run"]["spectrumList"]["spectrum"][i][ "scanList"]["scan"]["cvParam"][0]["@value"])) ms2_injectiontime.append(float( my_dict["mzML"]["run"]["spectrumList"][ "spectrum"][i]["scanList"]["scan"][ "cvParam"][2]["@value"])) plot_data = {"MS1_RT": ms1_rt, "MS1_Basemz": ms1_basemz, "MS1_Basemzintensity": ms1_basemzintensity, "MS1_Ticintensity": ms1_ticintensity, "MS2_RT": ms2_rt, "MS2_Injectiontime": ms2_injectiontime} newurl = RawFile.objects.filter(pk=instance.pk)[ 0].rawfile.name.replace(".raw", ".pkl") outputfilename = os.path.join(settings.MEDIA_ROOT, newurl) with open(outputfilename, "wb") as handle: pickle.dump(plot_data, handle, protocol=pickle.HIGHEST_PROTOCOL) RawFile.objects.filter(pk=instance.pk).update(pklfile=newurl) pklform = { "filelocation": newurl } pklobj = PklStorage.objects.create(**pklform, ) RawFile.objects.filter( pk=instance.pk).update(pkl_storage=pklobj) RawFile.objects.filter(pk=instance.pk).update( content_extracted=True) except Exception as err: exception_type = type(err).__name__ print(exception_type) instance.note = "Raw file extraction failed" else: instance.note = "Not valid data file type" class UserProfile(models.Model): user = models.OneToOneField(User, related_name='profile', on_delete=models.SET_NULL, blank=True, null=True) hide_otherresult = models.BooleanField(default=False, null=True) def __str__(self): return 'Profile of user: {}'.format(self.user.username) class SpectromineQueue(models.Model): """used to describe spectromine queue.""" rawfile = models.ForeignKey( "RawFile", on_delete=models.SET_NULL, null=True, blank=True, ) run_status = models.BooleanField(default=False, null=True) protein_id = models.IntegerField(blank=True, null=True) peptide_id = models.IntegerField(blank=True, null=True) start_time = models.DateTimeField(blank=True, null=True) finished_time = models.DateTimeField(blank=True, null=True) creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True) result_file = models.FileField( upload_to=(f"hdstorage/spectromine/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) def save(self, *args, **kwargs): super(SpectromineQueue, self).save(*args, **kwargs) if self.protein_id is not None: RawFile.objects.filter(pk=self.rawfile.pk).update( lastqc_protein_id=self.protein_id, lastqc_peptide_id=self.peptide_id, lastqc_tool="Spectromine", lastqc_time=datetime.datetime.now()) class SpectromineWorker(models.Model): """used to describe data process worker.""" worker_name = models.TextField(max_length=100, blank=True, null=True) worker_ip = models.TextField(max_length=100, blank=True, null=True) worker_status = models.TextField(max_length=100, blank=True, null=True) current_job = models.ForeignKey( "SpectromineQueue", on_delete=models.SET_NULL, null=True, blank=True, ) current_percentage = models.IntegerField(blank=True, null=True) last_update = models.DateTimeField(blank=True, null=True) class MaxquantQueue(models.Model): """used to describe Maxquant queue.""" rawfile = models.ManyToManyField(RawFile) analysis_name = models.TextField(max_length=100, blank=True, null=True) setting_xml = models.FileField( upload_to=(f"maxquant_xml/{date.today().year}/{date.today().month}/" f"{date.today().day}"), null=True, blank=True,) evidence_file = models.FileField( upload_to=(f"hdstorage/maxquant/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) protein_file = models.FileField( upload_to=(f"hdstorage/maxquant/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) peptide_file = models.FileField( upload_to=(f"hdstorage/maxquant/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) other_file = models.FileField( upload_to=(f"hdstorage/maxquant/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) run_status = models.BooleanField(default=False, null=True) protein_id = models.IntegerField(blank=True, null=True) peptide_id = models.IntegerField(blank=True, null=True) start_time = models.DateTimeField(blank=True, null=True) finished_time = models.DateTimeField(blank=True, null=True) creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True) class MaxquantWorker(models.Model): """used to describe maxquant data process worker.""" worker_name = models.TextField(max_length=100, blank=True, null=True) worker_ip = models.TextField(max_length=100, blank=True, null=True) worker_status = models.TextField(max_length=100, blank=True, null=True) current_job = models.ForeignKey( "MaxquantQueue", on_delete=models.SET_NULL, null=True, blank=True, ) current_percentage = models.IntegerField(blank=True, null=True) last_update = models.DateTimeField(blank=True, null=True) class MsfraggerQueue(models.Model): """used to describe Msfragger queue.""" rawfile = models.ManyToManyField(RawFile) analysis_name = models.TextField(max_length=100, blank=True, null=True) ion_file = models.FileField( upload_to=(f"hdstorage/msfragger/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) psm_file = models.FileField( upload_to=(f"hdstorage/msfragger/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) peptide_file = models.FileField( upload_to=(f"hdstorage/msfragger/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) protein_file = models.FileField( upload_to=(f"hdstorage/msfragger/{date.today().year}/" f"{date.today().month}/{date.today().day}"), null=True, blank=True,) run_status = models.BooleanField(default=False, null=True) precurosr_id = models.IntegerField(blank=True, null=True) psm_id = models.IntegerField(blank=True, null=True) peptide_id = models.IntegerField(blank=True, null=True) protein_id = models.IntegerField(blank=True, null=True) start_time = models.DateTimeField(blank=True, null=True) finished_time = models.DateTimeField(blank=True, null=True) creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True) class MsfraggerWorker(models.Model): """used to describe Msfragger data process worker.""" worker_name = models.TextField(max_length=100, blank=True, null=True) worker_ip = models.TextField(max_length=100, blank=True, null=True) worker_status = models.TextField(max_length=100, blank=True, null=True) current_job = models.ForeignKey( "MsfraggerQueue", on_delete=models.SET_NULL, null=True, blank=True, ) current_percentage = models.IntegerField(blank=True, null=True) last_update = models.DateTimeField(blank=True, null=True) class PdQueue(models.Model): """used to describe ProteinDiscoverer queue.""" rawfile = models.ManyToManyField(RawFile) analysis_name
z1]) val.append(annot_img[x2, y1, z2]) val.append(annot_img[x2, y2, z1]) val.append(annot_img[x2, y2, z2]) val = val[1:] labels.append(max(set(val), key = val.count)) # print(labels) labels_name = [] for label in labels: with open(lookup_table, 'r') as f: lines = f.readlines() rows = len(lines) for row in range(rows): line = lines[row][0: 8] b = str(int(label)) if re.match(b, line): # print(lines[row]) a = lines[row][len(b): -16].strip() labels_name.append(a) break return labels_name class ElectrodeSeg: def __init__(self, filePath, patName, iLabel, numMax, diameterSize, spacing, gap): super(ElectrodeSeg, self).__init__() # set up input initials self.filePath = filePath self.patientName = patName raw_flag = 0 # check for the filepath existance for root, dirs, files in os.walk(self.filePath): for filename in files: if re.search(r'CT_intra.nii.gz', filename): raw_flag = 1 self.rawDataPath = f"{self.filePath}/{filename}" break if not raw_flag: sys.exit() label_flag = 0 for root, dirs, files in os.walk(self.filePath): for filename in files: if re.search(r'_labels.npy', filename): label_flag = 1 self.labelsPath = f"{self.filePath}/{filename}" break if not label_flag: sys.exit() self.rawData = nib.load(self.rawDataPath).get_fdata() self.labels = np.load(self.labelsPath) self.iLabel = iLabel self.numMax = numMax self.diameterSize = diameterSize self.spacing = spacing self.gap = gap # some calculations to get the rest initials self.labelValues = np.unique(self.labels) self.numElecs = len(self.labelValues) - 1 if self.numElecs > 8: # remove 'I' from the alphabet list, a trivial custom not to name the electrode 'I' self.alphaList = [chr(i) for i in range(65, 66+self.numElecs)] self.alphaList.pop(8) else: self.alphaList = [chr(i) for i in range(65, 65+self.numElecs)] self.iValue = self.labelValues[self.iLabel] self.nameLabel = self.alphaList[self.iLabel-1] data_elec = np.copy(self.labels) data_elec[np.where(self.labels != self.iValue)] = 0 ## isolate a single cluster of voxels belonging to the ith electrode self.xs, self.ys, self.zs = np.where(data_elec != 0) self.pos_elec = np.transpose(np.vstack((self.xs, self.ys, self.zs))) ## positions of these voxels ### test! data_elec1 = np.copy(self.labels) data_elec1[np.where(self.labels == self.iValue)] = 0 self.xrest, self.yrest, self.zrest = np.where(data_elec1 != 0) self.rawData[self.xrest, self.yrest, self.zrest] = 0 ### test! self.rawData_single = self.rawData xmin = np.amin(self.xs) xmax = np.amax(self.xs) ymin = np.amin(self.ys) ymax = np.amax(self.ys) zmin = np.amin(self.zs) zmax = np.amax(self.zs) # self.rawData_single[self.xs, self.ys, self.zs] = self.rawData_single[self.xs, self.ys, self.zs] * 3 self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] = self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] * 3 self.resultPath = f"{self.filePath}/{self.patientName}_result" if not os.path.exists(self.resultPath): os.mkdir(self.resultPath) self.resultFile = f"{self.resultPath}/{self.nameLabel}.txt" self.elecPos = [0, 0, 0] self.headStart = [0, 0, 0] self.targetPoint = [0, 0, 0] self.regressInfo = [0, 0, 0, 0] def pipeline(self): self.startPoint() self.contactPoint(1) self.regression() for j in np.arange(self.numMax - 1): # if self.rawData[int(round(self.elecPos[-1,0])), int(round(self.elecPos[-1,1])), int(round(self.elecPos[-1,2]))] == 0: # self.elecPos = self.elecPos[0:-1, :] # break if int(self.elecPos[-1,0])==int(self.elecPos[-2,0]) and int(self.elecPos[-1,1])==int(self.elecPos[-2,1]) and int(self.elecPos[-1,2])==int(self.elecPos[-2,2]): self.elecPos = self.elecPos[0:-1, :] break self.step() if self.flag_step_stop: break self.elecPos = self.elecPos[1:, :] # print(self.elecPos) self.resulting() # return self.elecPos def resulting(self): self.elecPos_true = np.copy(self.elecPos) self.elecPos_true[:, 0] = 128 - self.elecPos[:, 0] self.elecPos_true[:, 1] = 128 - self.elecPos[:, 1] self.elecPos_true[:, 2] = self.elecPos[:, 2] - 128 self.elecPos_true = self.elecPos_true[:, [0, 2, 1]] self.elecFilepath = os.path.join(self.filePath, f"{self.patientName}_result") if not os.path.exists(self.elecFilepath): os.mkdir(self.elecFilepath) else: self.elecFile = os.path.join(self.elecFilepath, f"{self.nameLabel}.txt") with open(self.elecFile, "ab") as f: f.seek(0) f.truncate() # f.write(b"\n") np.savetxt(f, self.elecPos_true, fmt='%10.8f', delimiter=' ', newline='\n', header=f"{self.elecPos_true.shape[0]}") ## target point functions def startPoint(self): ## firstly find a voxel near the target x = [np.max(self.xs), np.min(self.xs)] y = [np.max(self.ys), np.min(self.ys)] z = [np.max(self.zs), np.min(self.zs)] self.reg1 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.ys) # x-y self.reg2 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.zs) # x-z self.reg3 = LinearRegression().fit(X=self.ys.reshape(-1,1), y=self.zs) # y-z coefs = [abs(self.reg1.coef_), abs(self.reg2.coef_), abs(self.reg3.coef_)] coef_min = coefs.index(min(coefs)) if coef_min == 0: index = [0 if self.reg2.coef_>0 else 1, 0 if self.reg3.coef_>0 else 1, 0] elif coef_min == 1: index = [0 if self.reg1.coef_>0 else 1, 0, 0 if self.reg3.coef_>0 else 1] else: index = [0, 0 if self.reg1.coef_>0 else 1, 0 if self.reg2.coef_>0 else 1] indexreverse = [~index[0], ~index[1], ~index[2]] point1 = np.array([x[index[0]], y[index[1]], z[index[2]]]) point2 = np.array([x[indexreverse[0]], y[indexreverse[1]], z[indexreverse[2]]]) center = 127.5 * np.ones(3) diff1 = point1 - center diff2 = point2 - center headStart = point2 if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else point1 self.direction = indexreverse if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else index ## secondly specify a target voxel in label voxels diffs = self.pos_elec - headStart diffs2 = np.power(diffs[:,0], 2) + np.power(diffs[:,1], 2) + np.power(diffs[:,2], 2) headPointPos = np.argmin(diffs2) self.headStart = self.pos_elec[headPointPos, :] def converge(self, x, y, z): ## converge to the mass center of a cluster of voxels n = self.diameterSize delta = math.ceil(round((n - 1) / 2, 1)) # represent the radius of the electrode contact ## extract a cubic ROI of the raw CT data seq_s = np.arange(x - delta, x + delta + 1) seq_r = np.arange(y - delta, y + delta + 1) seq_c = np.arange(z - delta, z + delta + 1) if not ((np.array(seq_s) > 0).all() and (np.array(seq_r) > 0).all() and (np.array(seq_c) > 0).all()): print('Error: index too small 0!') return 0, 0, 0 elif not ((np.array(seq_s) < 256).all() and (np.array(seq_r) < 256).all() and (np.array(seq_c) < 256).all()): print('Error: index too large 256!') return 0, 0, 0 else: ## extract the ROI cubic # test!!! matrixVoxels = self.rawData_local[seq_s[0]:seq_s[-1]+1, seq_r[0]:seq_r[-1]+1, seq_c[0]:seq_c[-1]+1] sumVoxels = np.sum(matrixVoxels) if (np.sum(matrixVoxels)== 0): print('Error: Converge to non-elec region!') return 0, 0, 0 else: f = np.zeros((1, 4)) for index, element in np.ndenumerate(matrixVoxels): x, y, z = index tmp = np.array([x+seq_s[0], y+seq_r[0], z+seq_c[0], element]) f = np.vstack((f, tmp)) f = f[1:] CM = np.average(f[:,:3], axis=0, weights=f[:,3]) C100 = CM[0] C010 = CM[1] C001 = CM[2] x1 = C100 y1 = C010 z1 = C001 return x1, y1, z1 def contactPoint(self, target): ## converge to an electrode contact position x0 = self.headStart[0] if target == 1 else self.x0 y0 = self.headStart[1] if target == 1 else self.y0 z0 = self.headStart[2] if target == 1 else self.z0 x = int(round(x0)) y = int(round(y0)) z = int(round(z0)) print(f"initial start voxel:({x0}, {y0}, {z0})") # test!!! self.rawData_local = self.rawData_single diff_array = self.pos_elec - np.array([x0, y0, z0]) elec_diffs = np.sqrt(np.dot(diff_array, np.transpose(diff_array)).diagonal()) ind_diffs = np.where(elec_diffs <= 2) self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] = self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] * 2 (x1, y1, z1) = self.converge(x, y, z) itr = 1 flag_convergence = 0 while not ((x==int(round(x1))) and (y==int(round(y1))) and (z==int(round(z1)))): x = int(round(x1)) y = int(round(y1)) z = int(round(z1)) (x1, y1, z1) = self.converge(x, y, z) itr = itr + 1 if itr > 5: flag_convergence = 1 break print(f"Convergent center voxel coordinates:({x1},{y1},{z1})") print(f"Convergent center voxel value:{self.rawData[int(round(x1)), int(round(y1)), int(round(z1))]}") self.flag_step_stop = 0 if (x1, y1, z1) == (0, 0, 0): self.flag_step_stop = 1 print('here1,converged to 0!') # self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]]) else: if not flag_convergence: print('here2,converged normally!') self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]]) else: print('here3, maybe not convergent!') self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]]) def regression(self): ## regress an electrode and find the axis direction X = np.transpose(np.vstack((self.xs, self.ys))) y = self.zs forcedX = np.transpose(np.array([self.targetPoint[0], self.targetPoint[1]])) forcedy = self.targetPoint[2] ## implant a contraint regression, forcing on the head point X = X - forcedX y = y - forcedy reg = Lasso(fit_intercept=False).fit(X=X, y=y) reg.intercept_ = reg.intercept_ + forcedy - np.dot(forcedX, reg.coef_) ## regression between x and y reg2 = LinearRegression(fit_intercept=True).fit(X=self.xs.reshape(-1,1), y=self.ys) self.coef = reg.coef_ self.intercept = reg.intercept_ self.coef2 = reg2.coef_ self.intercept2 = reg2.intercept_ def step(self): ## step out along the electrode axis dis = self.spacing # initial step size # delta_x = np.sqrt(np.power(dis, 2) / (1 + np.power(self.coef2[0],2) + np.power(np.dot(self.coef, np.array([1, self.coef2[0]])) ,2))) # delta_y = np.dot(self.coef2[0], delta_x) # delta_z = np.dot(self.coef, np.array([1, self.coef2[0]])) * delta_x diff_x = np.max(self.xs) - np.min(self.xs) diff_y = np.max(self.ys) - np.min(self.ys) diff_z = np.max(self.zs) - np.min(self.zs) a = np.power(diff_x,2) + np.power(diff_y,2) + np.power(diff_z,2) delta_x = diff_x * np.sqrt(np.power(dis,2) / a) delta_y = diff_y * np.sqrt(np.power(dis,2) / a) delta_z = diff_z * np.sqrt(np.power(dis,2) / a) # delta_x = self.reg2.coef_ * np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2))) # delta_y = self.reg3.coef_ * np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2))) # delta_z = np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))
) class DATAFILEPARAMETER(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "DATAFILEPARAMETER" __singularfieldname__ = "datafileParameter" __pluralfieldname__ = "datafileParameters" __table_args__ = ( Index("UNQ_DATAFILEPARAMETER_0", "DATAFILE_ID", "PARAMETER_TYPE_ID"), ) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) dateTimeValue = Column("DATETIME_VALUE", DateTime) error = Column("ERROR", Float(asdecimal=True)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) numericValue = Column("NUMERIC_VALUE", Float(asdecimal=True)) rangeBottom = Column("RANGEBOTTOM", Float(asdecimal=True)) rangeTop = Column("RANGETOP", Float(asdecimal=True)) stringValue = Column("STRING_VALUE", String(4000)) datafileID = Column("DATAFILE_ID", ForeignKey("DATAFILE.ID"), nullable=False) parameterTypeID = Column( "PARAMETER_TYPE_ID", ForeignKey("PARAMETERTYPE.ID"), nullable=False, index=True, ) DATAFILE = relationship( "DATAFILE", primaryjoin="DATAFILEPARAMETER.datafileID == DATAFILE.id", backref="datafileParameters", ) PARAMETERTYPE = relationship( "PARAMETERTYPE", primaryjoin="DATAFILEPARAMETER.parameterTypeID == PARAMETERTYPE.id", backref="datafileParameters", ) class DATASET(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "DATASET" __singularfieldname__ = "dataset" __pluralfieldname__ = "datasets" __table_args__ = (Index("UNQ_DATASET_0", "INVESTIGATION_ID", "NAME"),) id = Column("ID", BigInteger, primary_key=True) complete = Column( "COMPLETE", Boolean, nullable=False, server_default=FetchedValue(), ) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) description = Column("DESCRIPTION", String(255)) doi = Column("DOI", String(255)) endDate = Column("END_DATE", DateTime) location = Column("LOCATION", String(255)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False) startDate = Column("STARTDATE", DateTime) investigationID = Column( "INVESTIGATION_ID", ForeignKey("INVESTIGATION.ID"), nullable=False, ) sampleID = Column("SAMPLE_ID", ForeignKey("SAMPLE.ID"), index=True) typeID = Column("TYPE_ID", ForeignKey("DATASETTYPE.ID"), nullable=False, index=True) INVESTIGATION = relationship( "INVESTIGATION", primaryjoin="DATASET.investigationID == INVESTIGATION.id", backref="datasets", ) SAMPLE = relationship( "SAMPLE", primaryjoin="DATASET.sampleID == SAMPLE.id", backref="datasets", ) DATASETTYPE = relationship( "DATASETTYPE", primaryjoin="DATASET.typeID == DATASETTYPE.id", backref="datasets", ) class DATASETPARAMETER(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "DATASETPARAMETER" __singularfieldname__ = "datasetParameter" __pluralfieldname__ = "datasetParameters" __table_args__ = ( Index("UNQ_DATASETPARAMETER_0", "DATASET_ID", "PARAMETER_TYPE_ID"), ) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) dateTimeValue = Column("DATETIME_VALUE", DateTime) error = Column("ERROR", Float(asdecimal=True)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) numericValue = Column("NUMERIC_VALUE", Float(asdecimal=True)) rangeBottom = Column("RANGEBOTTOM", Float(asdecimal=True)) rangeTop = Column("RANGETOP", Float(asdecimal=True)) stringValue = Column("STRING_VALUE", String(4000)) datasetID = Column("DATASET_ID", ForeignKey("DATASET.ID"), nullable=False) parameterTypeID = Column( "PARAMETER_TYPE_ID", ForeignKey("PARAMETERTYPE.ID"), nullable=False, index=True, ) DATASET = relationship( "DATASET", primaryjoin="DATASETPARAMETER.datasetID == DATASET.id", backref="datasetParameters", ) PARAMETERTYPE = relationship( "PARAMETERTYPE", primaryjoin="DATASETPARAMETER.parameterTypeID == PARAMETERTYPE.id", backref="datasetParameters", ) class DATASETTYPE(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "DATASETTYPE" __singularfieldname__ = "type" __pluralfieldname__ = "datasetTypes" __table_args__ = (Index("UNQ_DATASETTYPE_0", "FACILITY_ID", "NAME"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) description = Column("DESCRIPTION", String(255)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False) facilityID = Column("FACILITY_ID", ForeignKey("FACILITY.ID"), nullable=False) FACILITY = relationship( "FACILITY", primaryjoin="DATASETTYPE.facilityID == FACILITY.id", backref="datasetTypes", ) class FACILITYCYCLE(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "FACILITYCYCLE" __singularfieldname__ = "facilityCycle" __pluralfieldname__ = "facilityCycles" __table_args__ = (Index("UNQ_FACILITYCYCLE_0", "FACILITY_ID", "NAME"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) description = Column("DESCRIPTION", String(255)) endDate = Column("ENDDATE", DateTime) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False) startDate = Column("STARTDATE", DateTime) facilityID = Column("FACILITY_ID", ForeignKey("FACILITY.ID"), nullable=False) FACILITY = relationship( "FACILITY", primaryjoin="FACILITYCYCLE.facilityID == FACILITY.id", backref="facilityCycles", ) class GROUPING(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "GROUPING" __singularfieldname__ = "grouping" __pluralfieldname__ = "groupings" id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False, unique=True) class INSTRUMENT(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INSTRUMENT" __singularfieldname__ = "instrument" __pluralfieldname__ = "instruments" __table_args__ = (Index("UNQ_INSTRUMENT_0", "FACILITY_ID", "NAME"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) description = Column("DESCRIPTION", String(4000)) fullName = Column("FULLNAME", String(255)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False) type = Column("TYPE", String(255)) url = Column("URL", String(255)) facilityID = Column("FACILITY_ID", ForeignKey("FACILITY.ID"), nullable=False) FACILITY = relationship( "FACILITY", primaryjoin="INSTRUMENT.facilityID == FACILITY.id", backref="instruments", ) class INSTRUMENTSCIENTIST(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INSTRUMENTSCIENTIST" __singularfieldname__ = "instrumentScientist" __pluralfieldname__ = "instrumentScientists" __table_args__ = (Index("UNQ_INSTRUMENTSCIENTIST_0", "USER_ID", "INSTRUMENT_ID"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) instrumentID = Column( "INSTRUMENT_ID", ForeignKey("INSTRUMENT.ID"), nullable=False, index=True, ) userID = Column("USER_ID", ForeignKey("USER_.ID"), nullable=False) INSTRUMENT = relationship( "INSTRUMENT", primaryjoin="INSTRUMENTSCIENTIST.instrumentID == INSTRUMENT.id", backref="instrumentScientists", ) USER = relationship( "USER", primaryjoin="INSTRUMENTSCIENTIST.userID == USER.id", backref="instrumentScientists", ) class INVESTIGATION(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INVESTIGATION" __singularfieldname__ = "investigation" __pluralfieldname__ = "investigations" __table_args__ = (Index("UNQ_INVESTIGATION_0", "FACILITY_ID", "NAME", "VISIT_ID"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) doi = Column("DOI", String(255)) endDate = Column("ENDDATE", DateTime) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False) releaseDate = Column("RELEASEDATE", DateTime) startDate = Column("STARTDATE", DateTime) summary = Column("SUMMARY", String(4000)) title = Column("TITLE", String(255), nullable=False) visitId = Column("VISIT_ID", String(255), nullable=False) facilityID = Column("FACILITY_ID", ForeignKey("FACILITY.ID"), nullable=False) typeID = Column( "TYPE_ID", ForeignKey("INVESTIGATIONTYPE.ID"), nullable=False, index=True, ) FACILITY = relationship( "FACILITY", primaryjoin="INVESTIGATION.facilityID == FACILITY.id", backref="investigations", ) INVESTIGATIONTYPE = relationship( "INVESTIGATIONTYPE", primaryjoin="INVESTIGATION.typeID == INVESTIGATIONTYPE.id", backref="investigations", ) class INVESTIGATIONGROUP(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INVESTIGATIONGROUP" __singularfieldname__ = "investigationGroup" __pluralfieldname__ = "investigationGroups" __table_args__ = ( Index("UNQ_INVESTIGATIONGROUP_0", "GROUP_ID", "INVESTIGATION_ID", "ROLE"), ) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) role = Column("ROLE", String(255), nullable=False) groupID = Column("GROUP_ID", ForeignKey("GROUPING.ID"), nullable=False) investigationID = Column( "INVESTIGATION_ID", ForeignKey("INVESTIGATION.ID"), nullable=False, index=True, ) GROUPING = relationship( "GROUPING", primaryjoin="INVESTIGATIONGROUP.groupID == GROUPING.id", backref="investigationGroups", ) INVESTIGATION = relationship( "INVESTIGATION", primaryjoin="INVESTIGATIONGROUP.investigationID == INVESTIGATION.id", backref="investigationGroups", ) class INVESTIGATIONINSTRUMENT(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INVESTIGATIONINSTRUMENT" __singularfieldname__ = "investigationInstrument" __pluralfieldname__ = "investigationInstruments" __table_args__ = ( Index("UNQ_INVESTIGATIONINSTRUMENT_0", "INVESTIGATION_ID", "INSTRUMENT_ID"), ) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) instrumentID = Column( "INSTRUMENT_ID", ForeignKey("INSTRUMENT.ID"), nullable=False, index=True, ) investigationID = Column( "INVESTIGATION_ID", ForeignKey("INVESTIGATION.ID"), nullable=False, ) INSTRUMENT = relationship( "INSTRUMENT", primaryjoin="INVESTIGATIONINSTRUMENT.instrumentID == INSTRUMENT.id", backref="investigationInstruments", ) INVESTIGATION = relationship( "INVESTIGATION", primaryjoin="INVESTIGATIONINSTRUMENT.investigationID == INVESTIGATION.id", backref="investigationInstruments", ) class INVESTIGATIONPARAMETER(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INVESTIGATIONPARAMETER" __singularfieldname__ = "investigationParameter" __pluralfieldname__ = "investigationParameters" __table_args__ = ( Index("UNQ_INVESTIGATIONPARAMETER_0", "INVESTIGATION_ID", "PARAMETER_TYPE_ID"), ) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) dateTimeValue = Column("DATETIME_VALUE", DateTime) error = Column("ERROR", Float(asdecimal=True)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) numericValue = Column("NUMERIC_VALUE", Float(asdecimal=True)) rangeBottom = Column("RANGEBOTTOM", Float(asdecimal=True)) rangeTop = Column("RANGETOP", Float(asdecimal=True)) stringValue = Column("STRING_VALUE", String(4000)) investigationID = Column( "INVESTIGATION_ID", ForeignKey("INVESTIGATION.ID"), nullable=False, ) parameterTypeID = Column( "PARAMETER_TYPE_ID", ForeignKey("PARAMETERTYPE.ID"), nullable=False, index=True, ) INVESTIGATION = relationship( "INVESTIGATION", primaryjoin="INVESTIGATIONPARAMETER.investigationID == INVESTIGATION.id", backref="investigationParameters", ) PARAMETERTYPE = relationship( "PARAMETERTYPE", primaryjoin="INVESTIGATIONPARAMETER.parameterTypeID == PARAMETERTYPE.id", backref="investigationParameters", ) class INVESTIGATIONTYPE(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INVESTIGATIONTYPE" __singularfieldname__ = "type" __pluralfieldname__ = "investigationTypes" __table_args__ = (Index("UNQ_INVESTIGATIONTYPE_0", "NAME", "FACILITY_ID"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) description = Column("DESCRIPTION", String(255)) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) name = Column("NAME", String(255), nullable=False) facilityID = Column( "FACILITY_ID", ForeignKey("FACILITY.ID"), nullable=False, index=True, ) FACILITY = relationship( "FACILITY", primaryjoin="INVESTIGATIONTYPE.facilityID == FACILITY.id", backref="investigationTypes", ) class INVESTIGATIONUSER(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "INVESTIGATIONUSER" __singularfieldname__ = "investigationUser" __pluralfieldname__ = "investigationUsers" __table_args__ = ( Index("UNQ_INVESTIGATIONUSER_0", "USER_ID", "INVESTIGATION_ID", "ROLE"), ) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) role = Column("ROLE", String(255), nullable=False) investigationID = Column( "INVESTIGATION_ID", ForeignKey("INVESTIGATION.ID"), nullable=False, index=True, ) userID = Column("USER_ID", ForeignKey("USER_.ID"), nullable=False) INVESTIGATION = relationship( "INVESTIGATION", primaryjoin="INVESTIGATIONUSER.investigationID == INVESTIGATION.id", backref="investigationUsers", ) USER = relationship( "USER", primaryjoin="INVESTIGATIONUSER.userID == USER.id", backref="investigationUsers", ) class JOB(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "JOB" __singularfieldname__ = "job" __pluralfieldname__ = "jobs" id = Column("ID", BigInteger, primary_key=True) arguments = Column("ARGUMENTS", String(255)) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False) modId = Column("MOD_ID", String(255), nullable=False) modTime = Column("MOD_TIME", DateTime, nullable=False) applicationID = Column( "APPLICATION_ID", ForeignKey("APPLICATION.ID"), nullable=False, index=True, ) inputDataCollectionID = Column( "INPUTDATACOLLECTION_ID", ForeignKey("DATACOLLECTION.ID"), index=True, ) outputDataCollectionID = Column( "OUTPUTDATACOLLECTION_ID", ForeignKey("DATACOLLECTION.ID"), index=True, ) APPLICATION = relationship( "APPLICATION", primaryjoin="JOB.applicationID == APPLICATION.id", backref="jobs", ) DATACOLLECTION = relationship( "DATACOLLECTION", primaryjoin="JOB.inputDataCollectionID == DATACOLLECTION.id", backref="jobs", ) class KEYWORD(Base, EntityHelper, metaclass=EntityMeta): __tablename__ = "KEYWORD" __singularfieldname__ = "keyword" __pluralfieldname__ = "keywords" __table_args__ = (Index("UNQ_KEYWORD_0", "NAME", "INVESTIGATION_ID"),) id = Column("ID", BigInteger, primary_key=True) createId = Column("CREATE_ID", String(255), nullable=False) createTime = Column("CREATE_TIME", DateTime, nullable=False)
before function. "trigger": "enable_auto_rollbacks_button_clicked", "unless": [self.auto_rollbacks_enabled], "before": self.enable_auto_rollbacks, } yield { "source": "*", "dest": None, # Don't actually change state, just call the before function. "trigger": "disable_auto_rollbacks_button_clicked", "conditions": [self.any_slo_failing, self.auto_rollbacks_enabled], "before": self.disable_auto_rollbacks, } yield { "source": "*", "dest": None, "trigger": "slos_started_failing", "conditions": [self.auto_rollbacks_enabled], "unless": [self.already_rolling_back], "before": self.start_auto_rollback_countdown, } yield { "source": "*", "dest": None, "trigger": "slos_stopped_failing", "before": self.cancel_auto_rollback_countdown, } yield { "source": "*", "dest": None, "trigger": "snooze_button_clicked", "before": self.restart_timer, "conditions": [self.is_timer_running], } def disable_auto_rollbacks(self) -> None: self.cancel_auto_rollback_countdown() self.auto_rollback = False self.update_slack_status( f"Automatic rollback disabled for this deploy. To disable this permanently for this step, edit `deploy.yaml` and set `auto_rollback: false` for the `{self.deploy_group}` step." ) def enable_auto_rollbacks(self) -> None: self.auto_rollback = True self.auto_rollbacks_ever_enabled = True self.update_slack_status( f"Automatic rollback enabled for this deploy. Will watch for failures and rollback when necessary. To set this permanently, edit `deploy.yaml` and set `auto_rollback: false` for the `{self.deploy_group}` step." ) def auto_rollbacks_enabled(self) -> bool: """This getter exists so it can be a condition on transitions, since those need to be callables.""" return self.auto_rollback def get_auto_rollback_delay(self) -> float: return self.auto_rollback_delay def get_auto_certify_delay(self) -> float: if self.auto_certify_delay is not None: return self.auto_certify_delay else: if self.auto_rollbacks_ever_enabled: return DEFAULT_AUTO_CERTIFY_DELAY else: return 0 def already_rolling_back(self) -> bool: return self.state in self.rollback_states def status_code_by_state(self) -> Mapping[str, int]: codes = { "deploy_errored": 2, "deploy_cancelled": 1, "mfd_failed": self.mark_for_deployment_return_code, "abandon": 1, "complete": 0, } if not self.block: # If we don't pass --wait-for-deployment, then exit immediately after mark-for-deployment succeeds. codes["deploying"] = 0 if self.get_auto_certify_delay() <= 0: # Instead of setting a 0-second timer to move to certify, just exit 0 when the deploy finishes. codes["deployed"] = 0 return codes def get_active_button(self) -> Optional[str]: return { "start_deploy": "forward", "deploying": "forward", "deployed": None, "start_rollback": "rollback", "rolling_back": "rollback", "rolled_back": None, }.get(self.state) def on_enter_mfd_failed(self) -> None: self.update_slack_status( f"Marking `{self.commit[:8]}` for deployment for {self.deploy_group} failed. Please see Jenkins for more output." ) # noqa E501 def on_enter_deploying(self) -> None: # if self.block is False, then deploying is a terminal state so we will promptly exit. # Don't bother starting the background thread in this case. if self.block: thread = Thread( target=self.do_wait_for_deployment, args=(self.commit,), daemon=True ) thread.start() self.cancel_paasta_status_reminder() self.schedule_paasta_status_reminder() def on_exit_deploying(self) -> None: self.stop_waiting_for_deployment(self.commit) self.cancel_paasta_status_reminder() def on_enter_start_rollback(self) -> None: self.update_slack_status( f"Rolling back ({self.deploy_group}) to {self.old_git_sha}" ) self.mark_for_deployment_return_code = mark_for_deployment( git_url=self.git_url, deploy_group=self.deploy_group, service=self.service, commit=self.old_git_sha, ) if self.mark_for_deployment_return_code != 0: self.trigger("mfd_failed") else: self.update_slack_thread( f"Marked `{self.old_git_sha[:8]}` for {self.deploy_group}." + ( "\n" + self.get_authors() if self.deploy_group_is_set_to_notify("notify_after_mark") else "" ) ) self.trigger("mfd_succeeded") def on_enter_rolling_back(self) -> None: if self.block: thread = Thread( target=self.do_wait_for_deployment, args=(self.old_git_sha,), daemon=True, ) thread.start() def on_exit_rolling_back(self) -> None: self.stop_waiting_for_deployment(self.old_git_sha) def on_enter_deploy_errored(self) -> None: report_waiting_aborted(self.service, self.deploy_group) self.update_slack_status(f"Deploy aborted, but it will still try to converge.") self.send_manual_rollback_instructions() if self.deploy_group_is_set_to_notify("notify_after_abort"): self.ping_authors("Deploy errored") def on_enter_deploy_cancelled(self) -> None: if self.deploy_group_is_set_to_notify("notify_after_abort"): self.ping_authors("Deploy cancelled") def stop_waiting_for_deployment(self, target_commit: str) -> None: try: self.wait_for_deployment_tasks[target_commit].cancel() del self.wait_for_deployment_tasks[target_commit] except (KeyError, asyncio.InvalidStateError): pass @a_sync.to_blocking async def do_wait_for_deployment(self, target_commit: str) -> None: try: self.stop_waiting_for_deployment(target_commit) wait_for_deployment_task = asyncio.create_task( wait_for_deployment( service=self.service, deploy_group=self.deploy_group, git_sha=target_commit, soa_dir=self.soa_dir, timeout=self.timeout, progress=self.progress, polling_interval=self.polling_interval, diagnosis_interval=self.diagnosis_interval, time_before_first_diagnosis=self.time_before_first_diagnosis, notify_fn=self.ping_authors, ) ) self.wait_for_deployment_tasks[target_commit] = wait_for_deployment_task await wait_for_deployment_task if self.deploy_group_is_set_to_notify("notify_after_wait"): self.ping_authors(f"Finished waiting for deployment of {target_commit}") else: self.update_slack_thread( f"Finished waiting for deployment of {target_commit}" ) self.trigger("deploy_finished") except (KeyboardInterrupt, TimeoutError): self.trigger("deploy_cancelled") except NoSuchCluster: self.trigger("deploy_errored") except asyncio.CancelledError: # Don't trigger deploy_errored when someone calls stop_waiting_for_deployment. pass except Exception: log.error("Caught exception in wait_for_deployment:") log.error(traceback.format_exc()) self.trigger("deploy_errored") def on_enter_rolled_back(self) -> None: self.update_slack_status( f"Finished rolling back to `{self.old_git_sha[:8]}` in {self.deploy_group}" ) line = f"Rollback to {self.old_git_sha[:8]} for {self.deploy_group} complete" _log(service=self.service, component="deploy", line=line, level="event") self.start_timer(self.auto_abandon_delay, "auto_abandon", "abandon") def on_enter_deployed(self) -> None: self.update_slack_status( f"Finished deployment of `{self.commit[:8]}` to {self.deploy_group}" ) line = f"Deployment of {self.commit[:8]} for {self.deploy_group} complete" _log(service=self.service, component="deploy", line=line, level="event") self.send_manual_rollback_instructions() if self.any_slo_failing() and self.auto_rollbacks_enabled(): self.ping_authors( "Because an SLO is currently failing, we will not automatically certify. Instead, we will wait indefinitely until you click one of the buttons above." ) else: if self.get_auto_certify_delay() > 0: self.start_timer( self.get_auto_certify_delay(), "auto_certify", "certify" ) if self.deploy_group_is_set_to_notify("notify_after_good_deploy"): self.ping_authors() def on_enter_complete(self) -> None: if self.deploy_group_is_set_to_notify("notify_after_good_deploy"): self.ping_authors() def send_manual_rollback_instructions(self) -> None: if self.old_git_sha != self.commit: message = ( "If you need to roll back manually, run: " f"`paasta rollback --service {self.service} --deploy-group {self.deploy_group} " f"--commit {self.old_git_sha}`" ) self.update_slack_thread(message) print(message) def after_state_change(self) -> None: self.update_slack() super().after_state_change() def get_signalfx_api_token(self) -> str: return ( load_system_paasta_config() .get_monitoring_config() .get("signalfx_api_key", None) ) def get_button_text(self, button: str, is_active: bool) -> str: active_button_texts = { "forward": f"Rolling Forward to {self.commit[:8]} :zombocom:" } inactive_button_texts = { "forward": f"Continue Forward to {self.commit[:8]} :arrow_forward:", "complete": f"Complete deploy to {self.commit[:8]} :white_check_mark:", "snooze": f"Reset countdown", "enable_auto_rollbacks": "Enable auto rollbacks :eyes:", "disable_auto_rollbacks": "Disable auto rollbacks :close_eyes_monkey:", } if self.old_git_sha is not None: active_button_texts.update( {"rollback": f"Rolling Back to {self.old_git_sha[:8]} :zombocom:"} ) inactive_button_texts.update( { "rollback": f"Roll Back to {self.old_git_sha[:8]} :arrow_backward:", "abandon": f"Abandon deploy, staying on {self.old_git_sha[:8]} :x:", } ) return (active_button_texts if is_active else inactive_button_texts)[button] def start_auto_rollback_countdown(self, extra_text: str = "") -> None: cancel_button_text = self.get_button_text( "disable_auto_rollbacks", is_active=False ) super().start_auto_rollback_countdown( extra_text=f'Click "{cancel_button_text}" to cancel this!' ) if self.deploy_group_is_set_to_notify("notify_after_auto_rollback"): self.ping_authors() def deploy_group_is_set_to_notify(self, notify_type: str) -> bool: return deploy_group_is_set_to_notify( self.deploy_info, self.deploy_group, notify_type ) def __build_rollback_audit_details( self, rollback_type: RollbackTypes ) -> Dict[str, str]: return { "rolled_back_from": self.commit, "rolled_back_to": self.old_git_sha, "rollback_type": rollback_type.value, "deploy_group": self.deploy_group, } def log_slo_rollback(self) -> None: _log_audit( action="rollback", action_details=self.__build_rollback_audit_details( RollbackTypes.AUTOMATIC_SLO_ROLLBACK ), service=self.service, ) def log_user_rollback(self) -> None: _log_audit( action="rollback", action_details=self.__build_rollback_audit_details( RollbackTypes.USER_INITIATED_ROLLBACK ), service=self.service, ) async def wait_until_instance_is_done( executor: concurrent.futures.Executor, service: str, instance: str, cluster: str, git_sha: str, instance_config: LongRunningServiceConfig, polling_interval: float, diagnosis_interval: float, time_before_first_diagnosis: float, should_ping_for_unhealthy_pods: bool, notify_fn: Optional[Callable[[str], None]] = None, ) -> Tuple[str, str]: loop = asyncio.get_running_loop() diagnosis_task = asyncio.create_task( periodically_diagnose_instance( executor, service, instance, cluster, git_sha, instance_config, diagnosis_interval, time_before_first_diagnosis, should_ping_for_unhealthy_pods, notify_fn, ) ) try: while not await loop.run_in_executor( executor, functools.partial( check_if_instance_is_done, service, instance, cluster, git_sha, instance_config, ), ): await asyncio.sleep(polling_interval) return ( cluster, instance, ) # for the convenience of the caller, to know which future is finishing. finally: diagnosis_task.cancel() async def periodically_diagnose_instance( executor: concurrent.futures.Executor, service: str, instance: str, cluster: str, git_sha: str, instance_config: LongRunningServiceConfig, diagnosis_interval: float, time_before_first_diagnosis: float, should_ping_for_unhealthy_pods: bool, notify_fn: Optional[Callable[[str], None]] = None, ) -> None: await asyncio.sleep(time_before_first_diagnosis) loop = asyncio.get_running_loop() while True: try: await loop.run_in_executor( executor, functools.partial( diagnose_why_instance_is_stuck, service, instance, cluster, git_sha, instance_config, should_ping_for_unhealthy_pods, notify_fn, ), ) except asyncio.CancelledError: raise except Exception: print(f"Couldn't get status of {service}.{instance}:") traceback.print_exc() await asyncio.sleep(diagnosis_interval) def diagnose_why_instance_is_stuck( service: str, instance: str, cluster: str, git_sha: str, instance_config: LongRunningServiceConfig, should_ping_for_unhealthy_pods: bool, notify_fn: Optional[Callable[[str], None]] = None, ) -> None: api = client.get_paasta_oapi_client(cluster=cluster) try: status = api.service.status_instance( service=service, instance=instance, include_smartstack=False, include_envoy=False, include_mesos=False, new=True, ) except api.api_error as e: log.warning( "Error getting service status from PaaSTA API for " f"{cluster}: {e.status} {e.reason}" ) return print(f" Status for {service}.{instance} in {cluster}:") for version in status.kubernetes_v2.versions: # We call get_version_table_entry directly so that we can set version_name_suffix based on git_sha instead of # creation time of the version (which is what get_versions_table does.) # Without this, we'd call the old version "new" until the new version is actually created, which would be confusing. for line in get_version_table_entry( version, service, instance, cluster, version_name_suffix="new" if version.git_sha == git_sha else "old", show_config_sha=True, verbose=0, ): print(f" {line}") print("") if should_ping_for_unhealthy_pods and notify_fn: maybe_ping_for_unhealthy_pods( service, instance, cluster, git_sha, status, notify_fn ) already_pinged = False def maybe_ping_for_unhealthy_pods( service: str, instance: str, cluster: str, git_sha: str, status: InstanceStatusKubernetesV2, notify_fn: Callable[[str], None], ) -> None: global already_pinged if not already_pinged: # there can be multiple current versions, e.g. if someone changes yelpsoa-configs during a bounce. current_versions = [ v for v in status.kubernetes_v2.versions if v.git_sha == git_sha ] pingable_pods = [ pod for version in current_versions for pod in version.pods if should_ping_for_pod(pod) ] if pingable_pods: already_pinged = True ping_for_pods(service, instance, cluster, pingable_pods, notify_fn) def should_ping_for_pod(pod: KubernetesPodV2) -> bool: return recent_container_restart(get_main_container(pod)) def ping_for_pods( service: str, instance: str, cluster: str, pods: List[KubernetesPodV2], notify_fn: Callable[[str], None], ) -> None: pods_by_reason: Dict[str, List[KubernetesPodV2]] = {} for pod in pods: pods_by_reason.setdefault(get_main_container(pod).reason, []).append(pod) for reason, pods_with_reason in pods_by_reason.items(): explanation = { "Error": "crashed on startup", "OOMKilled": "run out of memory", "CrashLoopBackOff": "crashed on startup several times, and Kubernetes is backing off restarting them", }.get(reason, f"restarted ({reason})") status_tip = f"Take a
= None # type: Q child_connector = QConn.C_AND # Type: QConn def __init__(self, field: str, operator: QOper=QOper.O_EQUAL, value=None, *args): self._field = field self._field_operator = operator self._value = value if operator == QOper.O_BETWEEN: if len(args) == 1: self._between_value = args[0] else: raise ValueError('Second value in between operation missing') elif operator == QOper.O_IN: values = list() values.append(value) if args: for v in args: values.append(v) self._value = values def add(self, q_object, conn: QConn=QConn.C_AND): self.child = q_object self.child_connector = conn def negate(self): self.invert = not self.invert def _combine(self, other, conn: QConn=QConn.C_AND): if not isinstance(other, Q): raise TypeError(other) obj = self.__class__(self._field, self._field_operator, self._value) obj.invert = self.invert obj._between_value = self._between_value if self.child: obj.child = self.child obj.child_connector = self.child_connector obj.child.add(other, conn) else: obj.add(other, conn) return obj def __or__(self, other): return self._combine(other, QConn.C_OR) def __and__(self, other): return self._combine(other, QConn.C_AND) def __invert__(self): self.negate() return self def __str__(self): """ Return the formated where clause :return: where clause string """ clause = '' invert = '' placeholder = self.placeholder if isinstance(self._value, datetime.date): placeholder = "'{0}'".format(placeholder) if self.invert: invert = 'NOT ' if self._field_operator == QOper.O_IS_NULL: clause += ' {0} IS {1}NULL'.format(self._field, invert) elif self._field_operator == QOper.O_BETWEEN: clause += ' {0}{1} {2} {3} AND {3}'.format( invert, self._field, self._field_operator.value, placeholder) elif self._field_operator == QOper.O_IN: plhs = ', '.join(placeholder for x in self._value) clause += ' {0}{1} {2} ({3})'.format(invert, self._field, self._field_operator.value, plhs) else: clause += ' {0}{1} {2} {3}'.format(invert, self._field, self._field_operator.value, placeholder) if self.child: clause += ' {0}{1}'.format(self.child_connector.value, str(self.child)) return clause def get_args(self, args=None): """ Return the arguments for the where clause in a list :return: list of arguments """ if not args: args = list() if isinstance(self._value, list): for v in self._value: args.append(v) else: args.append(self._value) if self._between_value is not None: args.append(self._between_value) if self.child: args = self.child.get_args(args) return args class BaseQuery(object): """ This is the object that holds the options for each SQL statement part, then generates the SQL and return the results """ # None or list object with fields used in query or _fields = None # type: list _order_by = None # type: list _group_by = None # type: list _where = None # type: Q _distinct = False # type: bool _aggregate = False # type: list _limit = None # type: int _custom_sql = None # type: str _custom_args = None # type: list model = None # type: BaseUtilityModel_T def __init__(self, model: BaseUtilityModel_T = None): self.model = model def clone(self, **kwargs): """ Creates a copy of the current instance. The 'kwargs' parameter can be used by clients to update attributes after copying has taken place. """ clone = self.__class__(model=self.model) # Clone our underscore properties for k, v in self.__dict__.items(): if k.startswith('_'): clone.__dict__[k] = self.__dict__[k] clone.__dict__.update(kwargs) return clone def set_distinct(self): self._distinct = True def set_aggregate(self, *args): self._aggregate = list() for arg in args: self._aggregate.append(str(arg)) def set_fields(self, fields): self._fields = fields def set_group_by(self, fields): self._group_by = fields def set_order_by(self, fields): self._order_by = fields def set_limit(self, limit): if not isinstance(limit, int) or limit < 0: raise ValueError('Invalid value for LIMIT statement') self._limit = limit def add_q(self, negate, *args, **kwargs): """ Add Q objects to self._where :param negate: Invert the Q object :param args: list of Q objects :param kwargs: Key/Value dictionary """ if not args and not kwargs: raise ValueError('No filter arguments provided') q_list = list() if kwargs: for key in kwargs: q_list.append(Q(key, QOper.O_EQUAL, kwargs[key])) else: for q_object in list(args): q_list.append(q_object) for q_object in q_list: if negate: q_object = ~q_object if not self._where: self._where = q_object else: self._where.add(q_object) def to_sql(self) -> (str, list): """ Generate the SQL statment and return it :return: Tuple containing SQL statement and arguments. """ sql, args = self._get_sql_query() return sql, args def _get_sql_query(self): """ Generate a parameterized sql statment and args list # TODO: Move this to the Providers :return: SQL statment, args list """ try: db_table = self.model.Meta.db_table if not db_table: raise ModelError('db_table not defined in Model Meta class') except Exception: raise ModelError('db_table not defined in Model Meta class') distinct = '' if self._distinct is False else ' DISTINCT' # Setup SELECT fields fields = '' if self._fields and len(self._fields) > 0: fields += ', '.join(self._fields) if not fields and not self._aggregate: fields = '*' if self._aggregate: if fields: fields += ', ' + ', '.join(self._aggregate) else: fields = ', '.join(self._aggregate) # Setup SELECT WHERE clause where = '' args = None if self._where: where = ' WHERE{0}'.format(str(self._where)) args = self._where.get_args() # Setup SELECT GROUP BY clause if not self._group_by or len(self._group_by) == 0: group_by = '' else: group_by = ' GROUP BY {0}'.format(', '.join(self._group_by)) # Setup SELECT ORDER BY clause if not self._order_by or len(self._order_by) == 0: order_by = '' else: order_by = ' ORDER BY {0}'.format(', '.join(self._order_by)) if self._limit is None: limit = '' else: limit = ' LIMIT {0}'.format(self._limit) # Build SQL query here sql = 'SELECT{0} {1} FROM {2}{3}{4}{5}{6}'.format(distinct, fields, db_table, where, group_by, order_by, limit) sql = sql.replace(Q.placeholder, self.model.get_db_conn().placeholder) # TODO: Future: Figure out the best location to set the correct argument value placeholder. # TODO: Future: Right now we are defaulting to '?' for the argument value placeholder. return sql, args def run_query(self, db_conn: BaseDBConnection) -> list: """ Make the database query now :return: list of ModelBase objects populated :rtype: list[BaseUtilityModel_T] """ if not db_conn: raise TypeError('db_conn parameter must be active BaseDBConnection object') if not db_conn.db_connected(): raise ConnectionError('BaseDBConnection object is not connected to a database') if not self._custom_sql: sql, args = self._get_sql_query() else: sql = self._custom_sql args = self._custom_args if args: records = db_conn.db_exec_stmt(sql, args) else: records = db_conn.db_exec_stmt(sql) results = list() if records: # TODO: Future: maybe change this to a fetchmany() and return a smaller set each time like django does for record in records: model = self.model.__class__(db_conn, record) results.append(model) return results def count(self, db_conn) -> int: """ Return the number of records in the table # TODO: Move this to the Providers :return: record count """ if not self._custom_sql: sql = "SELECT count(1) AS count from {0}".format(self.model.Meta.db_table) args = None else: sql = "SELECT count(1) as count from ({0})".format(self._custom_sql) args = self._custom_args if args: record = db_conn.db_exec_stmt(sql, args) else: record = db_conn.db_exec_stmt(sql) if record: return int(record[0]['count'] if isinstance(record, list) else record['count']) return 0 class BaseQuerySet(object): """ This is the interface for accessing the underlying database """ _filter = None # type: str _result_cache = None # type: list _db_conn = None # type: BaseDBConnection _group_by = None # type: list _order_by = None # type: list query = None # type: BaseQuery model = None # type: BaseUtilityModel_T def __init__(self, db_conn: BaseDBConnection, model: BaseUtilityModel_T=None, query: BaseQuery=None): if not isinstance(model, BaseTableModel): raise ModelRequired('model parameter must be a ModelBase object') self._db_conn = db_conn self.model = model self.query = query or BaseQuery(self.model) def get_db_conn(self) -> BaseDBConnection: """ Return the databsae connection object. :return: Database connection object. """ return self._db_conn def _clone(self, **kwargs) -> "BaseQuerySet": """ Creates a copy of the current instance. The 'kwargs' parameter can be used by clients to update attributes after copying has taken place. """ query = self.query.clone() clone = self.__class__(db_conn=self._db_conn, model=self.model, query=query) # Clone our underscore properties for k, v in self.__dict__.items(): if k.startswith('_'): clone.__dict__[k] = self.__dict__[k] clone.query = query clone.__dict__.update(kwargs) return clone # def __deepcopy__(self, memo): # """ # Deep copy of a QuerySet doesn't populate the cache # """ # obj = self.__class__(self._db_conn, self.model, self.query) # for k, v in self.__dict__.items(): # if k == '_result_cache': # obj.__dict__[k] = None # else: # obj.__dict__[k] = copy.deepcopy(v, memo) # return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() obj_dict = self.__dict__.copy() return obj_dict def __setstate__(self, state): self.__dict__.update(state) def __repr__(self): return super(BaseQuerySet, self).__repr__() pass # data = list(self[:REPR_OUTPUT_SIZE + 1]) # if len(data) > REPR_OUTPUT_SIZE: # data[-1] = "...(remaining elements truncated)..." # return '<%s %r>' % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ Populate the cache now """ self._fetch_all() return
#!/usr/bin/env python # # Copyright 2016 MIT Lincoln Laboratory, Massachusetts Institute of Technology # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with # the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # """ Authors: <NAME> Date: April 30, 2015 Installation: Python 2.7 on Windows 7 Description: This script is an example of using pyTweet to collect profiles, timelines, friends and followers from the Official Twitter API. """ from pyTweet import pyTweet import datetime, os def main(): # PARAMETERS # Enter proxy host and port information host = '' port = '' # Load your seed of Twitter handles into the list username_seed below. username_seed = ['username1', 'username2'] userid_seed = [123, 12345] # Start date for timeline collection: Collect tweets starting with current date back until the day specified below timeline_start_date = datetime.date(year=2015, month=3, day=1) # Sample tweet ID twt_id = 1234567890 # List of links to photos and videos on Twitter download_media_links = [] # must have extensions 'gif', 'jpg', 'jpeg', 'jif', 'jfif', 'tif', 'tiff', 'png', 'pdf', or 'mp4' # Directory to save media media_save_dir = 'directory to save media' # Coordinates for Boston BOS = (42.3601, 71.0589) ## # AUTHENTICATE INTO THE OFFICIAL TWITTER API # Create proxies dictionary proxies = {'http': 'http://%s:%s' % (host, port), 'https': 'http://%s:%s' % (host, port)} # Load twitter keys twitter_keys = pyTweet.load_twitter_api_key_set() # API authorization OAUTH = pyTweet.get_authorization(twitter_keys) ## # CHECK KEYS # This function checks all of your saved Twitter API key JSON files to see if they can be used for collection pyTweet.check_twitter_key_functionality(host=host, port=port) ## # LOOK UP PROFILE INFORMATION # Returns a list of fully-hydrated user dictionaries, as specified by comma-separated values passed to the user_id # and/or screen_name parameters. # # There are a few things to note when using this method. # * You must be following a protected user to be able to see their most recent status update. If you don't follow a # protected user their status will be removed. # * The order of user IDs or screen names may not match the order of users in the returned array. # * If a requested user is unknown, suspended, or deleted, then that user will not be returned in the results list. # * If none of your lookup criteria can be satisfied by returning a user object, a HTTP 404 will be thrown. # # PARAMETERS: # ----------- # screen_names: List of screen names, or a single one - optional # user_ids: List of user IDs, or a single one - optional # include_entities: The entities node that may appear within embedded statuses will be disincluded when set to false. user_info = pyTweet.lookup_users(proxies=proxies, auth=OAUTH, screen_names=username_seed, user_ids=userid_seed, include_entities=True) print user_info ## # SEARCH FOR USER PROFILES # Search for users after providing a string query (or list of string queries). Up to 1000 users can be returned. # The exclusive command indicates OR or AND usage in the query. # # PARAMETERS: # -------- # q: Search term query, must be a string object or list of string objects # exclusive: Boolean, if True, search query terms with ORs rather than ANDs. Default is False # limit: limit to number of users to collect. Maximum and default values are 1000 # include_entities: The entities node will be disincluded from embedded tweet objects when set to false. user_search_results1 = pyTweet.search_users(q="Twitter API", proxies=proxies, auth=OAUTH, limit=1000, exclusive=False) print "\nSearch result 1: ", user_search_results1 user_search_results2 = pyTweet.search_users(q=['hangry', 'hippo'], proxies=proxies, auth=OAUTH, limit=1000, exclusive=True) print "\nSearch result 2: ", user_search_results2 ## # LOOK UP TIME LINES # Find timeline of a user occuring after start_date, either from a screen name or user ID. User timelines belonging # to protected users may only be requested when the authenticated user either 'owns' the timeline or is an approved # follower of the owner. The timeline returned is the equivalent of the one seen when you view a user's profile on # twitter.com. This method can only return up to 3,200 of a user's most recent Tweets. Native retweets of other # statuses by the user is included in this total, regardless of whether include_rts is set to false when requesting # this resource. # # PARAMETERS: # ----------- # user_id: The ID of the user for whom to return results for. # screen_name: The screen name of the user for whom to return results for. # trim_user: When set to true, each tweet returned in a timeline will include a user object including only the # status authors numerical ID. Omit this parameter to receive the complete user object. # exclude_replies: This boolean parameter will prevent replies from appearing in the returned timeline. Using # exclude_replies will mean you will receive up-to count tweets # contributor_details: This boolean parameter enhances the contributors element of the status response to # include the screen_name of the contributor. By default only the user_id of the contributor is included. # include_rts: When set to false, the timeline will strip any native retweets (though they will still count toward # both the maximal length of the timeline and the slice selected by the count parameter). Note: If you're # using the trim_user parameter in conjunction with include_rts, the retweets will still contain a full # user object. # :param start_date: start of timeline segment to collect, this is a datetime.date object. The default value is 52 # weeks ago from today tl1 = pyTweet.get_timeline(proxies=proxies, auth=OAUTH, start_date=timeline_start_date, user_id=userid_seed[0]) print "\nTimeline for user ID: {}".format(userid_seed[0]) print tl1 tl2 = pyTweet.get_timeline(proxies=proxies, auth=OAUTH, start_date=timeline_start_date, screen_name=username_seed[0]) print "\nTimeline for user screen name: {}".format(username_seed[0]) print tl2 ## # LOOK UP FRIENDS AND FOLLOWERS # There are two categories of API calls to collect friends and followers: # 1. Get lists of friends/follower IDs # 2. Get lists of friend/follower profile dictionaries # # Option 1 returns lists of IDs, and option 2 returns lists of profile dictionaries. These calls also vary on how # much information is returned per API call. You get 5,000 friend/follower IDs per call and 15 calls in a 15 minute # window. The later calls returning full profiles offer 20 users per call, and 15 calls in a 15 minute window. # # PARAMETERS: # ----------- # user_id: The ID of the user for whom to return results for - optional # screen_name: The screen name of the user for whom to return results for - optional # limit: limit to number of friends to collect. Set to None to get all friends. this is the default # skip_status: When set to either true, t or 1 statuses will not be included in the returned user objects # include_user_entities: The user object entities node will be disincluded when set to false. # # Get lists of friend and follower IDs friend_ids_list = pyTweet.get_user_friends(user_id=userid_seed[0], proxies=proxies, auth=OAUTH, limit=None) print "\nFriend IDs: ", friend_ids_list follower_ids_list = pyTweet.get_user_followers(user_id=userid_seed[0], proxies=proxies, auth=OAUTH, limit=200) print "\nFollower IDs: ", follower_ids_list # Get lists of friend and follower profile dictionaries. friend_profile_list = pyTweet.get_user_friend_profiles(screen_name=username_seed[0], proxies=proxies, auth=OAUTH, limit=50, include_user_entities=True) print "\nFriend profiles: ", friend_profile_list follower_profile_list = pyTweet.get_user_follower_profiles(screen_name=username_seed[1], proxies=proxies, auth=OAUTH, limit=70) print "\nFollower profiles: ", follower_profile_list ## # SEARCH FOR TWEETS # Return a list of tweets (or just one) with the following function # # PARAMETERS: # ----------- # tweet_id: Unique ID of tweet, or list of tweet IDs # include_entities: The entities node that may appear within embedded statuses will be disincluded when set to false. # trim_user: When set to
<reponame>atreebangalore/jaltol import os #import numpy as np #import pandas as pd #import matplotlib.pyplot as plt #from matplotlib.figure import Figure #from matplotlib import axes from pathlib import Path import sys import time import json import math import inspect import qgis.core from qgis.core import ( Qgis, QgsProject, QgsLayout, QgsLayoutExporter, QgsReadWriteContext, QgsVectorLayer, QgsFeature, QgsFeatureRequest, QgsExpression, QgsWkbTypes ) from qgis.gui import * from PyQt5.QtWidgets import QAction, QFileDialog, QDockWidget,QMenu from PyQt5.QtGui import QIcon,QPixmap from PyQt5.QtCore import Qt,QSignalMapper #contains Qt.BrushStyle from PyQt5.QtXml import QDomDocument from datetime import datetime from .jaltol_dialog import JaltolDockWidget import ee from ee_plugin import Map #from vectors import getLocalBoundaries #print(dir(getLocalBoundaries)) # functions within module aren't registering.. dunno why print(ee.String('Hello World from EE!').getInfo()) inspect_getfile = inspect.getfile(inspect.currentframe()) cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0] home_dir = os.path.join(os.path.expanduser('~')) class JaltolClass: def __init__(self, iface): # give plugin class access to QgisInterface self.iface = iface def initGui(self): icon = os.path.join(os.path.join(cmd_folder, 'logo.png')) # make path for logo image self.action = QAction(QIcon(icon), '&Jaltol', self.iface.mainWindow()) # create QAction self.action.triggered.connect(self.run) # if QAction triggered, run self.iface.addPluginToMenu('&Jaltol', self.action) # add action to menu self.iface.addToolBarIcon(self.action) self.first_start = True def unload(self): self.iface.removeToolBarIcon(self.action) # remove action from tool bar self.iface.removePluginMenu('&Jaltol', self.action) # remove action from plugin menu del self.action ########################################################################## ## AOI ## ########################################################################## def select_output_file(self,index): if index==0: print("index is 0") print("called select file for villages") self.dlg.lineE = self.dlg.lineEdit_aoi_selectVB elif index==1: print("index is 1") print("called select file for watersheds") self.dlg.lineE = self.dlg.lineEdit_aoi_selectWB elif index==2: print("index is 2") print("called select file for custom boundaries") self.dlg.lineE = self.dlg.lineEdit_aoi_selectCB else: pass filename, _filter = QFileDialog.getOpenFileName( # use file dialog to get filename self.dlg, "Select shape file ","", '*.shp') self.dlg.lineE.setText(filename) print("line Edit populated") #self.add_boundary_layer() def add_boundary_layer(self,index): print("called add_boundary_layer") self.filename = self.dlg.lineE.text() if len(self.filename)==0: self.iface.messageBar().pushMessage('Please select the correct shapefile', level=Qgis.Critical, duration=10) return if index==0: self.layername = 'Village Boundaries' self.vlayer = QgsVectorLayer(self.filename, self.layername, "ogr") elif index==1: self.layername = 'Watershed Boundaries' elif index==2: self.layername = 'Custom Boundaries' else: pass print("layer name: ", self.layername) self.newlayer = QgsVectorLayer(self.filename, self.layername, "ogr") self.project.addMapLayer(self.newlayer) alayer = self.iface.activeLayer() single_symbol_renderer = alayer.renderer() symbol = single_symbol_renderer.symbol() symbol.symbolLayer(0).setBrushStyle(Qt.BrushStyle(Qt.NoBrush)) if index==0: self.get_states() else: pass def get_states(self): print("called get States") idx = self.vlayer.fields().indexOf('State_N') li = sorted(self.vlayer.uniqueValues(idx)) if qgis.core.NULL in li: li.remove(qgis.core.NULL) self.dlg.comboBox_aoi_selectS.clear() self.dlg.comboBox_aoi_selectS.addItems(li) def get_districts(self): print("called get Districts") self.state = self.dlg.comboBox_aoi_selectS.currentText() self.stateFilter = "\"State_N\"='" + self.state + "'" self.vlayerFilter = self.stateFilter #print(self.vlayerFilter) expr = QgsExpression(self.vlayerFilter) stateFeas = self.vlayer.getFeatures(QgsFeatureRequest(expr)) print(stateFeas) li = [] for fea in stateFeas: if (fea['Dist_N']==None): li.append("BLANK") else: li.append(fea['Dist_N']) li = sorted(set(li)) self.dlg.comboBox_aoi_selectD.clear() self.dlg.comboBox_aoi_selectD.addItems(li) def get_blocks(self): print("called get Block") self.dist = self.dlg.comboBox_aoi_selectD.currentText() print("of district: ",self.dist) self.distFilter = "\"Dist_N\"='" + self.dist + "'" self.vlayerFilter = self.stateFilter + " and " + self.distFilter #print(self.vlayerFilter) expr = QgsExpression(self.vlayerFilter) distFeas = self.vlayer.getFeatures(QgsFeatureRequest(expr)) li = [] for fea in distFeas: if (fea['SubDist_N']==None): li.append("BLANK") else: li.append(fea['SubDist_N']) li = sorted(set(li)) print("no of blocks:",len(li)) self.dlg.comboBox_aoi_selectB.clear() self.dlg.comboBox_aoi_selectB.addItems(li) def get_villages(self): print("called get Villages") self.block = self.dlg.comboBox_aoi_selectB.currentText() self.blockFilter = "\"SubDist_N\"='" + self.block + "'" self.vlayerFilter = self.stateFilter + " and " + self.distFilter + " and " + self.blockFilter expr = QgsExpression(self.vlayerFilter) blockFeas = self.vlayer.getFeatures(QgsFeatureRequest(expr)) li = [] for fea in blockFeas: if (fea['VCT_N']==None): li.append("BLANK") else: li.append(str(fea['VCT_N'])) li = sorted(set(li)) self.dlg.comboBox_aoi_selectV.clear() self.dlg.comboBox_aoi_selectV.addItems(li) def select_village(self): print("called select village") self.vlg = self.dlg.comboBox_aoi_selectV.currentText() self.vlgFilter = "\"VCT_N\"='"+self.vlg+"'" self.vlayerFilter = self.stateFilter + " and " + self.distFilter + " and " + self.blockFilter + " and " + self.vlgFilter self.vlayerFilter = self.vlayerFilter # "\""+ self.vlayerFilter.replace("\"","\\\"") +"\"" print(self.vlayerFilter) print(self.iface.activeLayer()) # expr = QgsExpression(self.vlayerFilter) #print("is expression valid: ",checkExpression(expr)) self.iface.activeLayer().selectByExpression(self.vlayerFilter) def zoom_village(self): print("called zoom to village") self.select_village() print(self.iface.setActiveLayer(self.vlayer)) print(self.vlayer, self.iface.activeLayer()) self.iface.actionZoomToSelected().trigger() print("zoom to Village triggered","\n") ########################################################################## ## Water Balance Layers ## ########################################################################## #### POPULATE UI #### def populate_lulc_choices(self): li = ["2000","2015","2016","2017","2018","2019"] self.dlg.comboBox_5.clear() self.dlg.comboBox_5.addItems(sorted(set(li))) def populate_layer_list(self,layer): if layer=='rain': start,end=(2000,2021) misc = [] elif layer=='et': start,end=(2003,2021) misc = [] elif layer=='sm': start,end=(2015,2021) misc = [] elif layer=='groundwater': start,end=(1996,2016) misc = ['2020'] elif layer=='surfacewater': start,end=(2000,2021) misc = [] elif layer=='wbyear': start,end=(2000,2017) misc = ['2020'] else: print("incorrect layer name provided to populate layer list") li = list(range(start,end)) years = [str(yr) for yr in li] + misc if layer=='rain': self.dlg.comboBox_9.clear() self.dlg.comboBox_9.addItems(sorted(years)) elif layer=='et': self.dlg.comboBox_11.clear() self.dlg.comboBox_11.addItems(sorted(years)) elif layer=='sm': self.dlg.comboBox_13.clear() self.dlg.comboBox_13.addItems(sorted(years)) elif layer=='groundwater': self.dlg.comboBox_12.clear() self.dlg.comboBox_12.addItems(sorted(years)) elif layer=='surfacewater': self.dlg.comboBox_10.clear() self.dlg.comboBox_10.addItems(sorted(years)) elif layer=='wbyear': self.dlg.comboBox_6.clear() self.dlg.comboBox_6.addItems(sorted(years)) else: print("incorrect layer name provided to populate layer list") #### DEFINE LAYERS #### def make_lulc_image(self): geeAssetString = 'users/cseicomms/lulc_13class/KA_' + str(int(self.lulc_yr)+1) print(geeAssetString) self.lulc = ee.Image(geeAssetString) print(type(self.lulc)) def make_rain_image(self): rainColl = ee.ImageCollection("users/cseicomms/rainfall_imd") start = ee.Date.fromYMD(int(self.rain_year),6,1) end = ee.Date.fromYMD(int(self.rain_year)+1,5,31) self.rain = rainColl.filterDate(start,end).sum() print(type(self.rain)) def make_et_image(self): geeAssetString = 'users/cseicomms/evapotranspiration_ssebop/wy' + self.et_year self.et = ee.Image(geeAssetString) print(type(self.et)) def make_sw_image(self): print(type(self.sw_year),self.sw_year) y1str = 'users/cseicomms/surfacewater/preMonsoonVolume/' + self.sw_year y2str = 'users/cseicomms/surfacewater/preMonsoonVolume/' + str(int(self.sw_year) + 1) year1 = ee.Image(y1str) year2 = ee.Image(y2str) print(y1str,y2str) y1unmask = year1.subtract(year1) y2unmask = year2.subtract(year2) self.sw = year2.unmask(y1unmask).subtract(year1.unmask(y2unmask)) print(type(self.sw)) def make_gw_image(self): print(type(self.gw_year),self.gw_year) rcstr = 'users/cseicomms/groundwater/recharge/' + self.gw_year dcstr = 'users/cseicomms/groundwater/discharge/' + self.gw_year systr = 'users/cseicomms/groundwater/sy_mean_cgwb' rc = ee.Image(rcstr) dc = ee.Image(dcstr) self.sy = ee.Image(systr).divide(100) print(rcstr,dcstr,systr) self.gw = rc.subtract(dc).multiply(1000).multiply(self.sy) print(type(self.gw)) # new code # y1str = 'users/cseicomms/groundwater/may_' + self.gw_year # y2str = 'users/cseicomms/groundwater/may_' + str(int(self.gw_year) + 1) # systr = 'users/cseicomms/groundwater/sy_mean_cgwb' # year1 = ee.Image(y1str) # year2 = ee.Image(y2str) # self.sy = ee.Image(systr).divide(100) # print(y1str,y2str,systr) # self.gw = year1.subtract(year2).multiply(100).multiply(self.sy) # print(type(self.gw)) def make_sm_image(self): smColl = ee.ImageCollection("NASA_USDA/HSL/SMAP10KM_soil_moisture"); print(type(self.sm_year),self.sm_year) year = int(self.sm_year) myFilter = ee.Filter.And(ee.Filter.calendarRange(year,year,'year'),ee.Filter.calendarRange(5,5,'month')) year1 = smColl.filter(myFilter).select('susm').median() year = int(self.sm_year) + 1 myFilter = ee.Filter.And(ee.Filter.calendarRange(year,year,'year'),ee.Filter.calendarRange(5,5,'month')) year2 = smColl.filter(myFilter).select('susm').median() self.sm = year2.subtract(year1) print(type(self.sm)) #### ADD LAYERS TO MAP #### def add_lulc_image(self): self.lulc_yr = self.dlg.comboBox_5.currentText() self.make_lulc_image() paletteLULC = ['02451E','06FC6D','FC0D06','28B505','750776','C713A9','C713A9', 'C713A9','E27FF9','E27FF9','E27FF9','765904','765904','765904', 'EAB828','EAB828','EAB828','092CEE','09EECB','Grey','Black'] lulc_label = 'lulc_' + self.lulc_yr Map.addLayer(self.lulc, {'palette': paletteLULC, 'min': 0, 'max': 20}, lulc_label, True) Map.centerObject(self.lulc,10) def add_rain_image(self): self.rain_year = int(self.dlg.comboBox_9.currentText()) self.make_rain_image() paletteRain = ['ff0','fff','00f'] rainViz = {'min':400,'max':2000,'palette':paletteRain} rain_label = 'rain_' + str(self.rain_year) Map.addLayer(self.rain, rainViz, rain_label, True) self.rain = None self.rain_year = None self.project.setCrs(self.crs) print("crs set to 3857") def add_et_image(self): self.et_year = self.dlg.comboBox_11.currentText() self.make_et_image() et_label = 'et_' + self.et_year Map.addLayer(self.et,{'min':300,'max':1500},et_label,True) self.et = None self.et_year = None def add_sw_image(self): self.sw_year = self.dlg.comboBox_10.currentText() self.make_sw_image() paletteSW = ['#f00','#000','#00f'] swViz = {'min':-80,'max':80,'palette':paletteSW} sw_label = 'sw_' + self.sw_year Map.addLayer(self.sw,swViz,sw_label,True) self.sw = None self.sw_year = None def add_gw_image(self): self.gw_year = self.dlg.comboBox_12.currentText() self.make_gw_image() paletteGW = ['#f00','#fff','#0f0'] gwViz = {'min':-80,'max':80,'palette':paletteGW} gw_label = 'gw_' + self.gw_year Map.addLayer(self.gw,gwViz,gw_label,True) self.gw = None self.gw_year = None def add_sm_image(self): self.sm_year = self.dlg.comboBox_13.currentText() self.make_sm_image() paletteSM = ['#f00','#fff','#0f0'] smViz = {'min':-80,'max':80,'palette':paletteSM} sm_label = 'sm_' + self.sm_year Map.addLayer(self.sm,smViz,sm_label,True) self.sm = None self.sm_year = None #### CALC WATER BALANCE VALUES #### def calc_rain_value(self): try: self.rain_value = round(self.rain.reduceRegion(ee.Reducer.median(),self.polygon,100).getInfo()['b1']) self.rain_str = str(self.rain_value) + ' mm' print("rain value(mod): ", self.rain_str) except Exception as e: print(e) print(self.rain_year + " " + "rainfall image not found") self.rain_value = math.nan self.rain_str = "NA" def calc_et_value(self): try: self.et_value = round(self.et.reduceRegion(ee.Reducer.median(),self.polygon,100).getInfo()['b1']) self.et_str = str(self.et_value) + ' mm' print("et value: ", self.et_str) except Exception as e: print(e) print(self.et_year + " " + "et image not found") self.et_value = math.nan self.et_str = "NA" def calc_sw_value(self): try: self.sw_vol_value = self.sw.reduceRegion(ee.Reducer.sum(),self.polygon).getInfo()['Volume'] print("sw volume (in m3): ",self.sw_vol_value,type(self.sw_vol_value)) self.sw_value_in_mm = self.sw_vol_value / self.polygon_area * 1000 self.sw_str = str(round(self.sw_value_in_mm)) + ' mm' print("sw value: ", self.sw_str) except Exception as e: print(e) print(self.sw_year + " " + "surface water image not found") self.sw_value_in_mm = math.nan self.sw_str = "NA" def calc_gw_value(self): try: self.gw_value = round(self.gw.reduceRegion(ee.Reducer.median(),self.polygon,100).getInfo()['b1']) self.gw_str = str(self.gw_value) + ' mm' print("gw value: ", self.gw_str) except Exception as e: print(e) print(self.gw_year + " " + "groundwater image not found") self.gw_value = math.nan self.gw_str = "NA" def calc_sm_value(self): try: self.sm_value = round(self.sm.reduceRegion(ee.Reducer.median(),self.polygon,100).getInfo()['susm']) self.sm_str = str(self.sm_value) + ' mm' print("sm value: ", self.sm_str) except Exception as e: print(e) print(self.sm_year + " " + "soil moisture image not found") self.sm_value = math.nan self.sm_str = "NA" def calc_ro_value(self): rhs = [self.et_value,self.sm_value,self.gw_value,self.sw_value_in_mm] rhsnonan = [x for x in rhs if math.isnan(x) == False] rhssum = sum(rhsnonan) print(f"sum of outputs is {rhssum}") if self.rain_value >= rhssum: self.ro_value = round(self.rain_value - rhssum) self.ro_str = str(self.ro_value) + ' mm' print("ro value: ",self.ro_str) else: self.ro_value = 0 self.ro_str = "0 mm" def calc_vill_area(self): self.select_village() vill = self.vlayer.getSelectedFeatures() def calc_water_balance(self): self.rain_year = self.et_year = self.gw_year = self.sw_year = self.sm_year = self.dlg.comboBox_6.currentText() self.wb_year = self.dlg.comboBox_6.currentText() self.wb_next_year = str(int(self.wb_year) + 1)[2:] self.make_rain_image() self.calc_rain_value() # change 'calc' to 'reduce' self.make_et_image() self.calc_et_value() self.make_sw_image() self.calc_sw_value() self.make_gw_image() self.calc_gw_value() self.make_sm_image() self.calc_sm_value() self.calc_ro_value()
<filename>train_rguo/train_code/predict_rguo_part2.py from __future__ import print_function, division, absolute_import import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader import numpy as np import pandas as pd from tqdm import tqdm import os import cv2 import skimage.io import openslide import torchvision.models as models import torch.utils.model_zoo as model_zoo import types import re from collections import OrderedDict import math from torch.utils import model_zoo from torch.utils.checkpoint import checkpoint,checkpoint_sequential torch.backends.cudnn.benchmark = False class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): """ Base class for bottlenecks that implements `forward()` method. """ def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out = self.se_module(out) + residual out = self.relu(out) return out class SEBottleneck(Bottleneck): """ Bottleneck for SENet154. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): """ ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe implementation and uses `stride=stride` in `conv1` and not in `conv2` (the latter is used in the torchvision implementation of ResNet). """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): """ ResNeXt bottleneck type C with a Squeeze-and-Excitation module. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self).__init__() width = math.floor(planes * (base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SENet(nn.Module): def __init__(self, block, layers, groups, reduction, dropout_p=0.2, inplanes=128, input_3x3=True, downsample_kernel_size=3, downsample_padding=1, num_classes=1000): """ Parameters ---------- block (nn.Module): Bottleneck class. - For SENet154: SEBottleneck - For SE-ResNet models: SEResNetBottleneck - For SE-ResNeXt models: SEResNeXtBottleneck layers (list of ints): Number of residual blocks for 4 layers of the network (layer1...layer4). groups (int): Number of groups for the 3x3 convolution in each bottleneck block. - For SENet154: 64 - For SE-ResNet models: 1 - For SE-ResNeXt models: 32 reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - For all models: 16 dropout_p (float or None): Drop probability for the Dropout layer. If `None` the Dropout layer is not used. - For SENet154: 0.2 - For SE-ResNet models: None - For SE-ResNeXt models: None inplanes (int): Number of input channels for layer1. - For SENet154: 128 - For SE-ResNet models: 64 - For SE-ResNeXt models: 64 input_3x3 (bool): If `True`, use three 3x3 convolutions instead of a single 7x7 convolution in layer0. - For SENet154: True - For SE-ResNet models: False - For SE-ResNeXt models: False downsample_kernel_size (int): Kernel size for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 3 - For SE-ResNet models: 1 - For SE-ResNeXt models: 1 downsample_padding (int): Padding for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 1 - For SE-ResNet models: 0 - For SE-ResNeXt models: 0 num_classes (int): Number of outputs in `last_linear` layer. - For all models: 1000 """ super(SENet, self).__init__() self.inplanes = inplanes if input_3x3: layer0_modules = [ ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True)), ] else: layer0_modules = [ ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True)), ] # To preserve compatibility with Caffe weights `ceil_mode=True` # is used instead of `padding=1`. layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True))) self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.avg_pool = nn.AvgPool2d(7, stride=1) self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None self.last_linear = nn.Linear(512 * block.expansion, num_classes) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, groups, reduction, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) def features(self, x): x = self.layer0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def features_ckpt(self, x): x.requires_grad = True x = checkpoint(self.layer0, x, preserve_rng_state=False) x = checkpoint_sequential(self.layer1, 3, x, preserve_rng_state=False) x = checkpoint_sequential(self.layer2, 4, x, preserve_rng_state=False) x = checkpoint_sequential(self.layer3, 6, x, preserve_rng_state=False) x = checkpoint_sequential(self.layer4, 3, x, preserve_rng_state=False) return x def logits(self, x): x = self.avg_pool(x) if self.dropout is not None: x = self.dropout(x) x = x.view(x.size(0), -1) x = self.last_linear(x) return x def forward(self, x): x = self.features(x) x = self.logits(x) return x def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'): model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) return model """ This file contains helper functions for building the model and for loading model parameters. These helper functions are built to mirror those in the official TensorFlow implementation. """ import re import math import collections from functools import partial import torch from torch import nn from torch.nn import functional as F from torch.utils import model_zoo ######################################################################## ############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ############### ######################################################################## # Parameters for the entire model (stem, all blocks, and head) GlobalParams = collections.namedtuple('GlobalParams', [ 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size']) # Parameters for an individual model block BlockArgs = collections.namedtuple('BlockArgs', [ 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', 'expand_ratio', 'id_skip', 'stride', 'se_ratio']) # Change namedtuple defaults GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields) BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) class SwishImplementation(torch.autograd.Function): @staticmethod def forward(ctx, i): result = i * torch.sigmoid(i) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i = ctx.saved_variables[0] sigmoid_i = torch.sigmoid(i) return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) class MemoryEfficientSwish(nn.Module): def forward(self, x): return SwishImplementation.apply(x) class Swish(nn.Module): def forward(self, x): return x * torch.sigmoid(x) def round_filters(filters, global_params): """ Calculate and round number of filters based on depth multiplier. """ multiplier = global_params.width_coefficient if not multiplier: return filters divisor = global_params.depth_divisor min_depth = global_params.min_depth filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters) def round_repeats(repeats, global_params): """ Round number of filters based on depth multiplier. """ multiplier = global_params.depth_coefficient if not multiplier: return repeats return int(math.ceil(multiplier * repeats)) def drop_connect(inputs, p, training): """ Drop connect. """ if not training: return inputs batch_size = inputs.shape[0] keep_prob = 1 - p # random_tensor = keep_prob random_tensor = torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device) + keep_prob binary_tensor = torch.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output def get_same_padding_conv2d(image_size=None): """ Chooses static padding if you have specified an image size, and dynamic
import re import csv import gzip import json import logging import itertools import requests from typing import List, Tuple, Optional, Union from functools import lru_cache import xml from xml.etree import ElementTree from urllib.error import HTTPError from protmapper.resources import resource_manager, feature_from_json, Feature logger = logging.getLogger(__name__) uniprot_url = 'http://www.uniprot.org/uniprot/' xml_ns = {'up': 'http://uniprot.org/uniprot'} @lru_cache(maxsize=10000) def query_protein(protein_id: str) -> Union[ElementTree.ElementTree, None]: """Retrieve the XML entry for a given protein. Parameters ---------- protein_id : The UniProt ID of the protein to look up. Returns ------- : An ElementTree representation of the XML entry for the protein. """ # Try looking up a primary ID if the given one # is a secondary ID and strip off isoforms protein_id = get_primary_id(_strip_isoform(protein_id)) url = uniprot_url + protein_id + '.xml' try: # As opposed to the RDF endpoint, the XML endpoint returns # an identical entry for secondary accessions, for instance, # the response for the secondary ID A0A021WW06 is identical to # the response for the primary ID P40417. ret = requests.get(url) et = ElementTree.fromstring(ret.content) return et except Exception as e: return None def _strip_isoform(protein_id): return protein_id.split('-')[0] def _split_isoform(protein_id): parts = protein_id.split('-', maxsplit=1) protein_id = parts[0] isoform = None if len(parts) == 2: if re.match(r'\d+', parts[1]): isoform = parts[1] return protein_id, isoform def _reattach_isoform(pid, iso): if iso is not None: return '%s-%s' % (pid, iso) else: return pid def is_secondary(protein_id): """Return True if the UniProt ID corresponds to a secondary accession. Parameters ---------- protein_id : str The UniProt ID to check. Returns ------- True if it is a secondary accessing entry, False otherwise. """ entry = um.uniprot_sec.get(_strip_isoform(protein_id)) if not entry: return False return True def is_reviewed(protein_id): """Return True if the UniProt ID corresponds to a reviewed entry. Parameters ---------- protein_id : str The UniProt ID to check. Returns ------- True if it is a reviewed entry, False otherwise. """ return _strip_isoform(protein_id) in um.uniprot_reviewed def get_primary_id(protein_id): """Return a primary entry corresponding to the UniProt ID. Parameters ---------- protein_id : str The UniProt ID to map to primary. Returns ------- primary_id : str If the given ID is primary, it is returned as is. Otherwise the primary IDs are looked up. If there are multiple primary IDs then the first human one is returned. If there are no human primary IDs then the first primary found is returned. """ base_id, isoform = _split_isoform(protein_id) primaries = um.uniprot_sec.get(base_id) if primaries: if len(primaries) > 1: logger.debug('More than 1 primary ID for %s.' % base_id) for primary in primaries: # Often secondary IDs were broken into multiple primary IDs # for different organisms. In this case we return the human # one if it exists. if is_human(primary): return _reattach_isoform(primary, isoform) # If we haven't returned anything then we just return the # first primary id return _reattach_isoform(primaries[0], isoform) # If there is no secondary entry then we assume this is a primary entry return protein_id def get_family_members(family_name, human_only=True): """Return the HGNC gene symbols which are the members of a given family. Parameters ---------- family_name : str Family name to be queried. human_only : bool If True, only human proteins in the family will be returned. Default: True Returns ------- gene_names : list The HGNC gene symbols corresponding to the given family. """ data = {'query': 'family:%s' % family_name, 'format': 'list'} if human_only: data['fil'] = 'organism:human' res = requests.get(uniprot_url, params=data) if not res.status_code == 200 or not res.text: return None # res.text gets us the Unicode html = res.text protein_list = html.strip().split('\n') gene_names = [] for p in protein_list: gene_name = get_gene_name(p) gene_names.append(gene_name) return gene_names def get_mnemonic(protein_id, web_fallback=False): """Return the UniProt mnemonic for the given UniProt ID. Parameters ---------- protein_id : str UniProt ID to be mapped. web_fallback : Optional[bool] If True and the offline lookup fails, the UniProt web service is used to do the query. Returns ------- mnemonic : str The UniProt mnemonic corresponding to the given Uniprot ID. """ protein_id = get_primary_id(_strip_isoform(protein_id)) try: mnemonic = um.uniprot_mnemonic[protein_id] return mnemonic except KeyError: pass if not web_fallback: return None tree = query_protein(protein_id) if tree is None: return None mnemonic = tree.find('up:entry/up:name', namespaces=xml_ns) if mnemonic is None: return None return mnemonic.text def get_id_from_mnemonic(uniprot_mnemonic): """Return the UniProt ID for the given UniProt mnemonic. Parameters ---------- uniprot_mnemonic : str UniProt mnemonic to be mapped. Returns ------- uniprot_id : str The UniProt ID corresponding to the given Uniprot mnemonic. """ try: uniprot_id = um.uniprot_mnemonic_reverse[uniprot_mnemonic] return uniprot_id except KeyError: return None def get_gene_name(protein_id, web_fallback=True): """Return the gene name or canonical protein name for the given UniProt ID. If available, this function returns the primary gene name provided by UniProt. If not available, the primary protein name is returned. Parameters ---------- protein_id : str UniProt ID to be mapped. web_fallback : Optional[bool] If True and the offline lookup fails, the UniProt web service is used to do the query. Returns ------- gene_name : str The gene name corresponding to the given Uniprot ID. """ protein_id = get_primary_id(_strip_isoform(protein_id)) try: gene_name = um.uniprot_gene_name[protein_id] # We only get here if the protein_id was in the dict if gene_name: return gene_name # We do it this way to return None for empty strings else: return None except KeyError: if not web_fallback: return None tree = query_protein(protein_id) if tree is None: return None name = tree.find('up:entry/up:gene/up:name', namespaces=xml_ns) if name is not None: return name.text return None def get_gene_synonyms(protein_id: str) -> List[str]: """Return a list of synonyms for the gene corresponding to a protein. Note that synonyms here also include the official gene name as returned by get_gene_name. Parameters ---------- protein_id : The UniProt ID of the protein to query Returns ------- : The list of synonyms of the gene corresponding to the protein """ protein_id = get_primary_id(_strip_isoform(protein_id)) protein = query_protein(protein_id) if protein is None: return [] synonyms = [] gene_synoyms = protein.findall('up:entry/up:gene/up:name', namespaces=xml_ns) for gene_syn in gene_synoyms: synonyms.append(gene_syn.text) return synonyms def get_protein_synonyms(protein_id): """Return a list of synonyms for a protein. Note that this function returns protein synonyms as provided by UniProt. The get_gene_synonym returns synonyms given for the gene corresponding to the protein, and get_synonyms returns both. Parameters ---------- protein_id : str The UniProt ID of the protein to query Returns ------- synonyms : list[str] The list of synonyms of the protein """ protein_id = get_primary_id(_strip_isoform(protein_id)) tree = query_protein(protein_id) if tree is None: return None synonyms = [] for syn_type, syn_len in itertools.product(['recommended', 'alternative'], ['full', 'short']): synonym_type = 'up:entry/up:protein/up:%sName/up:%sName' % \ (syn_type, syn_len) synonyms_xml = tree.findall(synonym_type, namespaces=xml_ns) for synonym_xml in synonyms_xml: synonyms.append(synonym_xml.text) return synonyms def get_synonyms(protein_id): """Return synonyms for a protein and its associated gene. Parameters ---------- protein_id : str The UniProt ID of the protein to query Returns ------- synonyms : list[str] The list of synonyms of the protein and its associated gene. """ protein_id = get_primary_id(_strip_isoform(protein_id)) ret = [] gene_syms = get_gene_synonyms(protein_id) if gene_syms: ret.extend(gene_syms) prot_syms = get_protein_synonyms(protein_id) if prot_syms: ret.extend(prot_syms) return ret @lru_cache(maxsize=10000) def get_sequence(protein_id): base, iso = _split_isoform(get_primary_id(protein_id)) # Try to get the sequence from the downloaded sequence files if iso == '1': protein_id = base else: protein_id = _reattach_isoform(base, iso) seq = um.uniprot_sequences.get(protein_id) if seq is None: url = uniprot_url + '%s.fasta' % protein_id res = requests.get(url) res.raise_for_status() # res.text is Unicode lines = res.text.splitlines() seq = (''.join(lines[1:])).replace('\n', '') return seq def get_modifications(protein_id: str) -> List[Tuple[str, int]]: """Return a list of modifications for a protein. Parameters ---------- protein_id : The UniProt ID of the protein to query Returns ------- : The list of modifications of the protein, each represented as a tuple of residue description string and position string. """ protein_id = get_primary_id(_strip_isoform(protein_id)) tree = query_protein(protein_id) if tree is None: return None # We find all features of type 'modified residue' features = tree.findall("up:entry/up:feature[@type='modified residue']", namespaces=xml_ns) mods = [] for feature in features: # We find the position of the modified residue pos_tag = feature.find('up:location/up:position', namespaces=xml_ns) if pos_tag is None: continue pos = int(pos_tag.attrib['position']) # We find the residue res = feature.attrib['description'].split(';')[0] mods.append((res, pos)) return mods def verify_location(protein_id, residue, location): """Return True if the residue is at the given location in the UP
<filename>utils.py #!/usr/bin/env python3 # coding=utf-8 import io import os import re import sys import ssl import glob import time import shlex import signal import subprocess has_filter = False has_progress = False has_winreg = False has_certifi = False is_cygwin = sys.platform == 'cygwin' is_win32 = sys.platform == 'win32' is_conemu = False last_progress = 0 # try importing the optional dependencies try: import winreg has_winreg = True except ImportError: pass try: import certifi has_certifi = True except ImportError: pass if is_win32: try: from colorama import init init() has_filter = True except ImportError: pass import ctypes class ConsoleCursorInfo(ctypes.Structure): _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] if not is_win32 or has_filter: class Fore: RED = '\x1B[91m' GREEN = '\x1B[92m' BLUE = '\x1B[94m' YELLOW = '\x1B[93m' RESET = '\x1B[39m' else: class Fore: RED = '' GREEN = '' BLUE = '' YELLOW = '' RESET = '' # registers for the interrupt signal in order to gracefully exit when Ctrl-C is hit def handle_sigint(): def signal_handler(signal, frame): clear_progress() show_cursor() print('%s[!]%s Terminating early due to interruption.' % (Fore.RED, Fore.RESET)) sys.exit(-1) signal.signal(signal.SIGINT, signal_handler) # check if any CA bundles were loaded or fallback to certifi otherwise def ensure_ca_load(): if ssl.create_default_context().cert_store_stats()['x509_ca'] == 0: if has_certifi: def create_certifi_context(purpose = ssl.Purpose.SERVER_AUTH, *, cafile = None, capath = None, cadata = None): return ssl.create_default_context(purpose, cafile = certifi.where()) ssl._create_default_https_context = create_certifi_context else: print('%s[!]%s Python was unable to load any CA bundles. Additionally, the fallback %scertifi%s module is not available. Install it with %spip3 install certifi%s for TLS connection support.' % (Fore.RED, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET)) sys.exit(-1) # parse image[:tag] | archive argument def parse_image_arg(argv, can_be_file = False): """ Parses the image[:tag] argument passed to the script. When can_be_file is True, the argument can also be a filename, _and_ if it is not, the generated image:tag should have a corresponding rootfs archive available. :param argv: Passed argument. :param can_be_file: Whether argument can be a file and image:tag should also resolve to a file. :return: Name of the image, tag, name of the file, label. """ exts = ['.tar', '.sfs', '.squashfs'] argvl = argv.lower() image = argv tag = 'latest' fname = '' label = '' if not can_be_file or all(ext not in argvl for ext in exts): # handle image:tag if ':' in image: idx = image.find(':') tag = image[idx + 1:] image = image[:idx] if can_be_file: fname = 'rootfs_%s_%s.tar*' % (image.replace('/', '_'), tag) names = glob.glob(fname) if len(names) > 0: fname = names[0] else: print('%s[!]%s No files found matching %s%s%s.' % (Fore.RED, Fore.RESET, Fore.BLUE, fname, Fore.RESET)) sys.exit(-1) else: fname = 'rootfs_%s_%s' % (image.replace('/', '_'), tag) label = '%s_%s' % (image.replace('/', '_'), tag) else: # handle file name fname = argv if not os.path.isfile(fname): print('%s[!]%s %s%s%s is not an existing file.' % (Fore.RED, Fore.RESET, Fore.BLUE, fname, Fore.RESET)) sys.exit(-1) idx = -1 for ext in exts: idx = argvl.find(ext) if idx != -1: break label = os.path.basename(argvl[:idx]) if label.startswith('rootfs_'): label = label[len('rootfs_'):] if label.find('_') == -1: label += '_' + tag return image, tag, fname, label # sanity check WSL installation def probe_wsl(silent = False): """ Checks whether the WSL is installed and not running. :type silent: Whether to print an error message or just return an empty string on failure. :return: Paths to the WSL directory and lxrun/bash executables. """ global is_cygwin if not is_cygwin: #packagesSubFolder = os.path.join(os.getenv('LocalAppData'), 'Packages') #basedir = os.path.join(packagesSubFolder, 'TheDebianProject.DebianGNULinux_76v4gfsz19hv4') #basedir = os.path.join(packagesSubFolder, 'CanonicalGroupLimited.UbuntuonWindows_79rhkp1fndgsc') #localStateDir = os.path.join(basedir, 'LocalState') basedir = "C://Users//cmdsz//Debian" localStateDir = basedir else: print('JPST: not yet fixed when running this process via cygwin, sorry!') sys.exit(-1) basedir = subprocess.check_output('/usr/bin/cygpath -F 0x001c', shell = True, universal_newlines = True) basedir = os.path.join(basedir.strip(), 'lxss') if not os.path.isdir(basedir): if silent: return None, None, None print('%s[!]%s The Linux subsystem is not installed. Please go through the standard installation procedure first.' % (Fore.RED, Fore.RESET)) sys.exit(-1) # new temp is in basedir/LocalState/temp if os.path.exists(os.path.join(localStateDir, 'temp')) and os.listdir(os.path.join(localStateDir, 'temp')): if silent: return None, None, None print('%s[!]%s The Linux subsystem is currently running. Please kill all instances before continuing.' % (Fore.RED, Fore.RESET)) sys.exit(-1) if not is_cygwin: syspath = os.getenv('SystemRoot') else: syspath = subprocess.check_output('/usr/bin/cygpath -W', shell = True, universal_newlines = True).strip() lxpath = '' #methinks location in System32 is from legacy installer lxpaths = [os.path.join(syspath, 'WinSxS\\amd64_microsoft-windows-lxss-installer_31bf3856ad364e35_10.0.17134.1_none_e9926368b80f9a59'), os.path.join(syspath, 'System32')] for path in lxpaths: if os.path.exists(os.path.join(path, 'LxRun.exe')): lxpath = path break if not lxpath and not silent: print('%s[!]%s Unable to find %slxrun.exe%s in the expected locations.' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET)) sys.exit(-1) bashpath = '' #new iteration of WSL splitted all linux related resources in seperate folders inside C:\Windows\WinSxS\* bashpaths = [os.path.join(syspath, 'WinSxS', 'amd64_microsoft-windows-lxss-bash_31bf3856ad364e35_10.0.17134.1_none_251beae725bc7de5'), os.path.join(syspath, 'System32')] for path in bashpaths: if os.path.exists(os.path.join(path, 'bash.exe')): bashpath = path break if not bashpath and not silent: print('%s[!]%s Unable to find %sbash.exe%s in the expected locations.' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET)) sys.exit(-1) return basedir, lxpath, bashpath # translate the path between Windows and Cygwin def path_trans(path): """ Translate the path, if required. Under the native Windows installation of Python, this function does nothing. Under the Cygwin version, the provided path is translated to a Windows-native path. :param path: Path to be translated. :return: Translated path. """ global is_cygwin if not is_cygwin or not path.startswith('/cygdrive/'): return path # too slow: # subprocess.check_output('/usr/bin/cygpath -w ' + shlex.quote(path), shell = True, universal_newlines = True).strip() path = path[10] + ':\\' + path[12:].replace('/', '\\') return path # get label of rootfs def get_label(path): """ Gets the label for the specified rootfs. If the .switch_label file is not present, but the OS was identified, the file will be created for future use. :param path: Path to the rootfs. :return: Label of the rootfs. """ # see if .switch_label exists if os.path.isfile(os.path.join(path, '.switch_label')): try: with open(os.path.join(path, '.switch_label')) as f: label = f.readline().strip() if len(label) > 0: return label except OSError: pass # check if the directory name has any stuff appended to it dirname = os.path.basename(path) if dirname.startswith('rootfs_'): label = dirname[len('rootfs_'):] if len(label) > 0: # save label for next occasion try: with open(os.path.join(path, '.switch_label'), 'w') as f: f.write(label + '\n') except OSError: pass return label # see if any *release files exist in /etc/ rlsfiles = glob.glob(os.path.join(path, 'etc', '*release')) + glob.glob(os.path.join(path, 'usr', 'lib', 'os-release*')) if len(rlsfiles) > 0: rlslines = [] for file in rlsfiles: try: with open(file) as f: rlslines += f.readlines() except OSError: pass name = ['', '', ''] # ID || DISTRIB_ID || NAME vers = ['', '', ''] # DISTRIB_CODENAME || DISTRIB_RELEASE || VERSION_ID for line in rlslines: kv = line.split('=', 1) if len(kv) < 2: continue key = kv[0].strip().strip('"\'').lower() val = kv[1].strip().strip('"\'').lower() if len(val) == 0: continue if key == 'id': name[0] = val elif key == 'distrib_id': name[1] = val elif key == 'name': name[2] = val if key == 'distrib_codename': vers[0] = val elif key == 'distrib_release': vers[1] = val elif key == 'version_id': vers[2] = val name = list(filter(None, name)) vers = list(filter(None, vers)) if len(name) > 0: label = name[0] + ('_' + vers[0] if len(vers) > 0 else '') # save label for next occasion try: with open(os.path.join(path, '.switch_label'), 'w') as f: f.write(label + '\n') except OSError: pass return label # oh well return '' # toggle cursor visibility in the terminal def show_cursor(): """ Turns the cursor back on in the terminal. """ if not sys.platform == 'win32': sys.stdout.write('\033[?25h') else: ci = ConsoleCursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = True ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) def hide_cursor(): """ Turns the cursor off in the terminal. """ global is_conemu if not sys.platform == 'win32': sys.stdout.write('\033[?25l') is_conemu = False else: ci = ConsoleCursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = False ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) is_conemu = os.environ.get('ConEmuANSI') == 'ON' # some characters are forbidden in NTFS, but are not in ext4. the most popular of these characters # seems to be the colon character. LXSS solves this issue by escaping the character on NTFS. # while this seems like a dumb implementation, it will be called a lot of times inside the # decompression loop, so it has to be fast: http://stackoverflow.com/a/27086669/156626 def escape_ntfs_invalid(name): """ Escapes characters which are forbidden in NTFS, but are not in ext4. :param name: Path potentially containing forbidden NTFS characters. :return: Path with forbidden NTFS characters escaped. """ return name.replace('*', '#002A').replace('|', '#007C').replace(':', '#003A').replace('>', '#003E').replace('<', '#003C').replace('?', '#003F').replace('"', '#0022') # stream copier with progress bar def chunked_copy(name, source, dest): """ Copies one stream into another, with progress bar. :param name: Name of the file to display. :param source: Source stream. :param dest: Destination stream. :return: Number of bytes copied. """ global is_conemu size = int(source.info()['Content-Length'].strip()) recv = 0 if len(name) > 23: name = name[0:20] + '...' hide_cursor() while True: chunk = source.read(8192) recv += len(chunk) if not chunk: break dest.write(chunk) draw_progress(recv, size, name) show_cursor() return recv # FileIO wrapper with progress bar class ProgressFileObject(io.FileIO): def __init__(self, path, *args, **kwargs): self._total_size = os.path.getsize(path) self.current_extraction = '' io.FileIO.__init__(self, path, *args, **kwargs) hide_cursor() def read(self, length): """ Read at most size bytes, returned as bytes. Only makes one system call, so less data may be returned than requested. In non-blocking mode, returns None if no data is available. Return an empty bytes object at EOF. """ draw_progress(self.tell(), self._total_size, self.current_extraction) return io.FileIO.read(self, length) def __del__(self): show_cursor() # standalone function to draw an interactive progressbar def draw_progress(recv, size, name): """ Draws an interactive progressbar based on the specified information. :param recv: Number of bytes received. :param size: Total size of the file. :param name: Name of the file to display. """ global is_conemu, has_progress, last_progress if recv > size: recv = size if recv == size: clear_progress() return if time.time() - last_progress < 0.05: return has_progress = True last_progress = time.time() if len(name) > 23: name = name[0:20] + '...' else: name = name.ljust(23, ' ') pct = round(recv / size * 100, 2) bar = int(50 * recv / size) sys.stdout.write('\r %s [%s>%s] %0.2f%%' % (name, '=' * bar, ' ' * (50 - bar), pct)) if is_conemu: sys.stdout.write('\033]9;4;1;%0.0f\033\\\033[39m' % pct) sys.stdout.flush() def clear_progress(): """ Clears the progress bar. """ global is_conemu, has_progress if not has_progress: return has_progress = False sys.stdout.write('\r%s\r' % (' ' * (66 + 23))) if is_conemu: sys.stdout.write('\033]9;4;0\033\\\033[39m') sys.stdout.flush() # functions to interact with the registry def get_lxss_user(): """ Gets the active user inside WSL. :return: Tuple
{'table': table3_name, 'id': 3, 'name': '<NAME>'}] INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS) pipeline_builder = sdc_builder.get_pipeline_builder() pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multitable Insert', INPUT_DATA, "${record:value('/table')}", 'INSERT') # JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method # when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL # error. pipeline.configure_for_environment(database) pipeline[2].set_attributes(table_name="${record:value('/table')}") # For Oracle, the default value of JDBC Producer's "Schema Name" property in the database environment is the # database name, but it should be the username instead. if isinstance(database, OracleDatabase): pipeline[2].set_attributes(schema_name=database.username.upper()) sdc_executor.add_pipeline(pipeline) try: sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS)) sdc_executor.stop_pipeline(pipeline) result1 = database.engine.execute(table1.select()) result2 = database.engine.execute(table2.select()) result3 = database.engine.execute(table3.select()) data1 = result1.fetchall() data2 = result2.fetchall() data3 = result3.fetchall() assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])] assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])] assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])] result1.close() result2.close() result3.close() finally: logger.info('Dropping tables %s, %s, %s in %s database...', table1_name, table2_name, table3_name, database.type) table1.drop(database.engine) table2.drop(database.engine) table3.drop(database.engine) # Test SDC-10719 @database @sdc_min_version('3.8.0') def test_jdbc_producer_multischema(sdc_builder, sdc_executor, database): """Test for JDBC Producer in a multischema scenario with a single destination table for each schema. We create 3 schemas with one table for each, with the same name. Then we use an EL expression to insert records according to the /schema record field. Pipeline: dev_raw_data_source >> record_deduplicator >> jdbc_producer record_deduplicator >> trash """ schema1_name = _get_random_name(database, prefix='stf_schema_') schema2_name = _get_random_name(database, prefix='stf_schema_') schema3_name = _get_random_name(database, prefix='stf_schema_') table_name = _get_random_name(database, prefix='stf_table_') _create_schema(schema1_name, database) _create_schema(schema2_name, database) _create_schema(schema3_name, database) table1 = _create_table(table_name, database, schema_name=schema1_name) table2 = _create_table(table_name, database, schema_name=schema2_name) table3 = _create_table(table_name, database, schema_name=schema3_name) ROWS = [{'schema': schema1_name, 'id': 1, 'name': '<NAME>'}, {'schema': schema2_name, 'id': 2, 'name': '<NAME>'}, {'schema': schema3_name, 'id': 3, 'name': '<NAME>'}] INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS) pipeline_builder = sdc_builder.get_pipeline_builder() pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema Insert', INPUT_DATA, table_name, 'INSERT') # JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances # (e.g. Sql Server database). We overwrite it afterwards for the test. pipeline.configure_for_environment(database) pipeline[2].set_attributes(schema_name="${record:value('/schema')}") sdc_executor.add_pipeline(pipeline) try: sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS)) sdc_executor.stop_pipeline(pipeline) result1 = database.engine.execute(table1.select()) result2 = database.engine.execute(table2.select()) result3 = database.engine.execute(table3.select()) data1 = result1.fetchall() data2 = result2.fetchall() data3 = result3.fetchall() assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])] assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])] assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])] result1.close() result2.close() result3.close() finally: logger.info('Dropping table %s in schemas...', table_name) table1.drop(database.engine) table2.drop(database.engine) table3.drop(database.engine) logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name) _drop_schema(schema1_name, database) _drop_schema(schema2_name, database) _drop_schema(schema3_name, database) # Test SDC-10719 @database @sdc_min_version('3.8.0') def test_jdbc_producer_multischema_multitable(sdc_builder, sdc_executor, database): """Test a JDBC Producer in a multischema scenario with different destination tables for each schema. We create 3 schemas with one table for each, with different names. Then we use an EL expressions to insert records according to the /schema and /table record fields. There were a limitation in previous versions that affected to MySQL and MemSQL. These RDBMs do not differentiate between schema and database. SDC used the database configured in the JDBC connection string, and looked for database metadata filtering by database+schema. If the schema were other than the database of the connection string, metadata could not be retrieved. This was a problem in a multischema scenario, where several schemas are employed. Pipeline: dev_raw_data_source >> record_deduplicator >> jdbc_producer record_deduplicator >> trash """ schema1_name = _get_random_name(database, prefix='stf_schema_') schema2_name = _get_random_name(database, prefix='stf_schema_') schema3_name = _get_random_name(database, prefix='stf_schema_') table1_name = _get_random_name(database, prefix='stf_table_') table2_name = _get_random_name(database, prefix='stf_table_') table3_name = _get_random_name(database, prefix='stf_table_') _create_schema(schema1_name, database) _create_schema(schema2_name, database) _create_schema(schema3_name, database) table1 = _create_table(table1_name, database, schema_name=schema1_name) table2 = _create_table(table2_name, database, schema_name=schema2_name) table3 = _create_table(table3_name, database, schema_name=schema3_name) ROWS = [{'schema': schema1_name, 'table': table1_name, 'id': 1, 'name': '<NAME>'}, {'schema': schema2_name, 'table': table2_name, 'id': 2, 'name': '<NAME>'}, {'schema': schema3_name, 'table': table3_name, 'id': 3, 'name': '<NAME>'}] INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS) pipeline_builder = sdc_builder.get_pipeline_builder() pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema and Multitable Insert', INPUT_DATA, "${record:value('/table')}", 'INSERT') # JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances # (e.g. Sql Server database). We overwrite it afterwards for the test. pipeline.configure_for_environment(database) pipeline[2].set_attributes(schema_name="${record:value('/schema')}") # JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method # when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL # error. pipeline[2].set_attributes(table_name="${record:value('/table')}") sdc_executor.add_pipeline(pipeline) try: sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS)) sdc_executor.stop_pipeline(pipeline) result1 = database.engine.execute(table1.select()) result2 = database.engine.execute(table2.select()) result3 = database.engine.execute(table3.select()) data1 = result1.fetchall() data2 = result2.fetchall() data3 = result3.fetchall() assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])] assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])] assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])] result1.close() result2.close() result3.close() finally: logger.info('Dropping tables %s, %s, %s...', table1_name, table2_name, table3_name) table1.drop(database.engine) table2.drop(database.engine) table3.drop(database.engine) logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name) _drop_schema(schema1_name, database) _drop_schema(schema2_name, database) _drop_schema(schema3_name, database) # SDC-11063: Do not reoder update statements in JDBC destination @sdc_min_version('3.0.0.0') @pytest.mark.parametrize('multi_row', [True, False]) @database def test_jdbc_producer_ordering(sdc_builder, sdc_executor, multi_row, database): """Ensure that variously intertwined operations won't be executed out of order in harmful way.""" table_name = get_random_string(string.ascii_lowercase, 20) metadata = sqlalchemy.MetaData() table = sqlalchemy.Table( table_name, metadata, sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True, autoincrement=False), sqlalchemy.Column('a', sqlalchemy.Integer, quote=True), sqlalchemy.Column('b', sqlalchemy.Integer, quote=True) ) RAW_DATA = [ # Update id=5 {"op": 3, "id": 5, "a": 2, "b": 2}, # Insert id=4 {"op": 1, "id": 4, "a": 1, "b": 1}, # Update id=4 {"op": 3, "id": 4, "a": 2, "b": 2}, # Delete id=5 {"op": 2, "id": 5}, # Insert id=1 {"op": 1, "id": 1, "a": 1, "b": 1}, # Update id=1 {"op": 3, "id": 1, "a": 2}, # Insert id=2 {"op": 1, "id": 2, "a": 1, "b": 1}, # Delete id=2 {"op": 2, "id": 2}, # Update id=1 {"op": 3, "id": 1, "a": 2, "b": 2}, # Insert id=3 {"op": 1, "id": 3, "a": 1, "b": 1}, # Update id=1 {"op": 3, "id": 1, "a": 3}, # Update id=3 {"op": 3, "id": 3, "a": 5}, # Delete id=3 {"op": 2, "id": 3} ] builder = sdc_builder.get_pipeline_builder() source = builder.add_stage('Dev Raw Data Source') source.stop_after_first_batch = True source.data_format = 'JSON' source.raw_data = '\n'.join(json.dumps(rec) for rec in RAW_DATA) expression = builder.add_stage('Expression Evaluator') expression.header_attribute_expressions = [ {'attributeToSet': 'sdc.operation.type', 'headerAttributeExpression': '${record:value("/op")}'} ] remover = builder.add_stage('Field Remover') remover.set_attributes(fields=['/op'], action='REMOVE') producer = builder.add_stage('JDBC Producer') producer.field_to_column_mapping = [] producer.default_operation = 'UPDATE' producer.table_name = table_name producer.use_multi_row_operation = multi_row if database.type == 'Oracle': producer.enclose_object_names = True source >> expression >> remover >> producer pipeline = builder.build().configure_for_environment(database) sdc_executor.add_pipeline(pipeline) try: logger.info('Creating table %s in %s database ...', table_name, database.type) table.create(database.engine) # The table will start with single row (id=5) logger.info('Inserting rows into %s in %s database', table_name, database.type) connection = database.engine.connect() connection.execute(table.insert(), {'id': 5, 'a': 1, 'b': 1}) # Finally run the pipeline and verify it's outcome sdc_executor.start_pipeline(pipeline).wait_for_finished() result = database.engine.execute(table.select()) db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id result.close() assert len(db) == 2 # id=1 assert 1 == db[0][0] assert 3 == db[0][1] assert 2 == db[0][2] # id=5 assert 4 == db[1][0] assert 2 == db[1][1] assert 2 == db[1][2] finally: logger.info('Dropping table %s in %s database ...', table_name, database.type) table.drop(database.engine) @sdc_min_version('3.0.0.0') @database def test_jdbc_multitable_events(sdc_builder, sdc_executor, database): """ Validate that we properly generate events """ if database.type == 'Oracle': pytest.skip("This test depends on auto-created ID that doesn't work properly on Oracle") table_prefix = get_random_string(string.ascii_lowercase, 20) table_a = '{}_a'.format(table_prefix) table_b = '{}_b'.format(table_prefix) table_events = '{}_events'.format(table_prefix) builder = sdc_builder.get_pipeline_builder() source = builder.add_stage('JDBC Multitable Consumer') source.transaction_isolation = 'TRANSACTION_READ_COMMITTED' source.table_configs = [{ 'tablePattern': f'{table_prefix}%', "enableNonIncremental": True, 'tableExclusionPattern': table_events }] trash = builder.add_stage('Trash') expression = builder.add_stage('Expression Evaluator') expression.field_expressions = [{ 'fieldToSet': '/tbl', 'expression': '${record:value("/table")}${record:value("/tables[0]")}' }, { 'fieldToSet': '/tbls', 'expression': '${record:value("/tables[0]")},${record:value("/tables[1]")}' }, { 'fieldToSet': '/event', 'expression': '${record:eventType()}' } ] producer = builder.add_stage('JDBC Producer') producer.table_name = table_events producer.default_operation = 'INSERT' producer.field_to_column_mapping = [ dict(field='/event', columnName='event'), dict(field='/tbl', columnName='tbl'), dict(field='/tbls', columnName='tbls') ] source >> trash source >= expression expression >> producer pipeline = builder.build().configure_for_environment(database) sdc_executor.add_pipeline(pipeline) # We need three tables for this test metadata = sqlalchemy.MetaData() a = sqlalchemy.Table( table_a, metadata, sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True) ) b = sqlalchemy.Table( table_b, metadata, sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False) ) events = sqlalchemy.Table( table_events, metadata, sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True), sqlalchemy.Column('event', sqlalchemy.String(50)), sqlalchemy.Column('tbl', sqlalchemy.String(150)), sqlalchemy.Column('tbls', sqlalchemy.String(150)) ) try: logger.info('Creating tables %s, %s and %s in %s database ...', table_a, table_b, table_events, database.type) a.create(database.engine) b.create(database.engine) events.create(database.engine) logger.info('Inserting
import os import time import json import logging import binascii import networkx as nx from copy import deepcopy from networkx import NetworkXNoPath from itertools import islice from threading import Thread, Event from flask import Flask, request, Response, abort from riemann import tx from bitcoind_mock.rpc_errors import * from bitcoind_mock import conf, utils from bitcoind_mock import transaction from bitcoind_mock.zmq_publisher import ZMQPublisher app = Flask(__name__) GENESIS_PARENT = "00" * 32 def set_event(event, wait_time): """ Sets the mining event once every ``wait_time`` seconds so a new block is generated at fixed intervals. Args: event(:obj:`Event`): the event to be set. wait_time(:obj:`int`): time between blocks. """ while True: time.sleep(wait_time) event.set() class BitcoindMock: """ Tiny mock of bitcoind. It creates a blockchain mock and a JSON-RCP interface. Let's you perform some of the bitcoind RPC commands (listed in process_request). Also let's you mine blocks by time or by demand, and create forks by demand. Attributes: blockchain(:obj:`DiGraph`): a directed graph representing the blockchain. blocks(:obj:`dict`): a dictionary keeping track of all blocks. Contains: ``{tx, height, previousblockhash, chainwork}`` mempool(:obj:`dict`): a dictionary keeping track of the transactions pending to be mined. mempool(:obj:`dict`): a dictionary with all the mined transactions and a reference to the block where they were mined. mine_new_block(:obj:`Event`): an event flag to trigger a new block (if the mock is set to mine based on events). best_tip(:obj:`str`): a reference to the chain best tip. genesis(:obj:`str`): a reference to the chain genesis block. last_mined_block(:obj:`str`): a reference to the last mined block. The mock will mine on top of it. """ def __init__(self): self.blockchain = nx.DiGraph() self.blocks = dict() self.mempool = dict() self.transactions = dict() self.mine_new_block = Event() # Create the genesis block block_hash, coinbase_tx, coinbase_tx_hash = self.get_new_block_data() self.transactions[coinbase_tx_hash] = {"tx": coinbase_tx, "block_hash": block_hash} self.blocks[block_hash] = { "tx": [coinbase_tx_hash], "height": 0, "previousblockhash": GENESIS_PARENT, "chainwork": 0, } self.blockchain.add_node(block_hash, short_id=block_hash[:8]) self.best_tip = block_hash self.last_mined_block = block_hash self.genesis = block_hash # Set the event so it can start mining right off the bat self.mine_new_block.set() @staticmethod def get_rpc_param(request_data): """ Gets the first parameter from a RPC call. Args: request_data(:obj:`list`): list of parameters from the rpc call. Returns: :obj:`str` or :obj:`None`: The first parameter of the call, or ``None`` if there are no parameters. """ params = request_data.get("params") if isinstance(params, list) and len(params) > 0: return params[0] else: return None @staticmethod def get_new_block_data(): """ Creates the data to be used for a mined block. Returns: :obj:`tuple`: A three item tuple (block_hash, coinbase_tx, coinbase_tx_hash) """ block_hash = utils.get_random_value_hex(32) coinbase_tx = transaction.create_dummy_transaction() return block_hash, coinbase_tx.hex(), coinbase_tx.tx_id.hex() def generate(self): """ Endpoint use to trigger the mining of a new block. It can be accessed at ``/`` using ``POST``. Returns: :obj:`Response`: An HTTP 200-OK response signaling the acceptance of the request. """ self.mine_new_block.set() return Response(status=200, mimetype="application/json") def create_fork(self): """ Endpoint used to trigger a chain fork. It can be accessed at ``/fork`` using ``POST``. Requires a JSON encoded data with the key ``parent`` and the hash of an already mined block. The blockchain will be forked from the ``parent``. Returns: :obj:`Response`: An HTTP 200-OK response signaling the acceptance of the request if the parent was a valid block. An HTTP 200-OK with an error if the parent was invalid. """ request_data = request.get_json() response = {"result": 0, "error": None} parent = request_data.get("parent") # FIXME: We only accept forks one by one for now if parent not in self.blocks: response["error"] = {"code": -1, "message": "Wrong parent block to fork from"} else: print("Forking chain from {}".format(parent)) self.last_mined_block = parent self.generate() return Response(json.dumps(response), status=200, mimetype="application/json") def process_request(self): """ Simulates the bitcoin-rpc server run by bitcoind. The available commands are limited to the ones we'll need to test out functionality. The model we will be using is pretty simplified to reduce the complexity of mocking bitcoind: decoderawtransaction: querying for the decoding of a raw transaction will return a dictionary with a single field: "txid". sendrawtransaction: sending a rawtransaction will notify our mining simulator to include such transaction in a subsequent block (add it to mempool). getrawtransaction: requesting a rawtransaction from a txid will return a dictionary containing a single field: "confirmations", since rawtransactions are only queried to check whether a transaction has made it to a block or not. getblockcount: the block count represents the height of the most-work fully-validated chain. The genesis block has height 0. getblock: querying for a block will return a dictionary with three fields: "tx" representing a list of transactions, "height" representing the block height and "hash" representing the block hash. getblockhash: returns the hash of a block given its height. getbestblockhash: returns the hash of the block in the tip of the chain. help: help is only used as a sample command to test if bitcoind is running when bootstrapping. It will return a 200/OK with no data. """ request_data = request.get_json() method = request_data.get("method") response = {"id": 0, "result": 0, "error": None} no_param_err = {"code": RPC_MISC_ERROR, "message": "JSON value is not a {} as expected"} if method == "decoderawtransaction": rawtx = self.get_rpc_param(request_data) if isinstance(rawtx, str) and len(rawtx) % 2 is 0: try: t = tx.Tx.from_hex(rawtx) response["result"] = {"txid": t.tx_id.hex()} except (ValueError, IndexError): response["error"] = {"code": RPC_DESERIALIZATION_ERROR, "message": "TX decode failed"} else: response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("string") elif method == "sendrawtransaction": # TODO: A way of rejecting transactions should be added to test edge cases. rawtx = self.get_rpc_param(request_data) if isinstance(rawtx, str) and len(rawtx) % 2 is 0: try: t = tx.Tx.from_hex(rawtx) txid = t.tx_id.hex() if txid not in self.transactions: self.mempool[txid] = rawtx response["result"] = {"txid": txid} else: response["error"] = { "code": RPC_VERIFY_ALREADY_IN_CHAIN, "message": "Transaction already in block chain", } except (ValueError, IndexError): response["error"] = {"code": RPC_DESERIALIZATION_ERROR, "message": "TX decode failed"} else: response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("string") elif method == "getrawtransaction": txid = self.get_rpc_param(request_data) if isinstance(txid, str): if txid in self.transactions: block_hash = self.transactions[txid]["block_hash"] if self.in_best_chain(block_hash): block = self.blocks.get(block_hash) rawtx = self.transactions[txid].get("tx") response["result"] = { "hex": rawtx, "confirmations": 1 + self.blocks.get(self.best_tip).get("height") - block.get("height"), } else: response["error"] = { "code": RPC_INVALID_ADDRESS_OR_KEY, "message": "No such mempool or blockchain transaction. Use gettransaction for wallet " "transactions.", } elif txid in self.mempool: response["result"] = {"confirmations": None} else: response["error"] = { "code": RPC_INVALID_ADDRESS_OR_KEY, "message": "No such mempool or blockchain transaction. Use gettransaction for " "wallet transactions.", } else: response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("string") elif method == "getblockcount": response["result"] = self.blocks[self.best_tip].get("height") elif method == "getblock": block_hash = self.get_rpc_param(request_data) if isinstance(block_hash, str): block = deepcopy(self.blocks.get(block_hash)) if block is not None: if self.in_best_chain(block_hash): block["confirmations"] = 1 + self.blocks.get(self.best_tip).get("height") - block.get("height") else: block["confirmations"] = -1 # chainwork is returned as a 32-byte hex by bitcoind block["chainwork"] = "{:064x}".format(block["chainwork"]) block["hash"] = block_hash response["result"] = block else: response["error"] = {"code": RPC_INVALID_ADDRESS_OR_KEY, "message": "Block not found"} else: response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("string") elif method == "getblockhash": height = self.get_rpc_param(request_data) if isinstance(height, int): if 0 <= height <= self.blocks.get(self.best_tip).get("height"): response["result"] = nx.shortest_path(self.blockchain, self.genesis, self.best_tip)[height] else: response["error"] = {"code": RPC_INVALID_PARAMETER, "message": "Block height out of range"} else: response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("integer") elif method == "getbestblockhash": response["result"] = self.best_tip elif method == "help": pass else: return abort(404, "Method not found") return Response(json.dumps(response), status=200, mimetype="application/json") def in_best_chain(self, block_hash): """ Returns whether a given block hash if part of the best chain or not. A block is party of the best chain if there a path from it to the best tip (directed graph). Args: block_hash(:obj:`str`): the block hash to be checked. Returns: :obj:`bool`: Whether the block is part of the best chain or not. """ try: nx.shortest_path(self.blockchain, block_hash, self.best_tip) return True except NetworkXNoPath: return False def simulate_mining(self, verbose=True): """ Simulates bicoin mining. The simulator ca be run in two modes: by events, or by time. If ``mode=='event'``, the simulator will be waiting for event on `/generate`. Otherwise, a block will be mined every ``TIME_BETWEEN_BLOCKS`` seconds. Transactions received via ``sendrawtransactions`` wil be included in a new generated block (up to ``TX_PER_BLOCK``). Also, the simulator will notify about new blocks via ZMQ. Args: verbose(:obj:`bool`): whether to print via stdout when
<gh_stars>1-10 # # (C) Copyright 2003-2007 Hewlett-Packard Development Company, L.P. # (C) Copyright 2006-2007 Novell, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # Author: <NAME> <<EMAIL>> # Author: <NAME> <<EMAIL>> # Author: <NAME> <<EMAIL>> # """CIM operations over HTTP. The `WBEMConnection` class in this module opens a connection to a remote WBEM server. Across this connection you can run various CIM operations. Each method of this class corresponds fairly directly to a single CIM operation. """ # This module is meant to be safe for 'import *'. import re from datetime import datetime, timedelta from pywbemReq import cim_obj, cim_xml, cim_http, cim_types from pywbemReq.cim_obj import CIMInstance, CIMInstanceName, CIMClass, \ CIMClassName, NocaseDict from pywbemReq.cim_types import is_text, is_text_or_bool from pywbemReq.tupleparse import ParseError, parse_reply_xml __all__ = ['DEFAULT_NAMESPACE', 'check_utf8_xml_chars', 'CIMError', 'WBEMConnection', 'is_subclass', 'PegasusUDSConnection', 'SFCBUDSConnection', 'OpenWBEMUDSConnection'] DEFAULT_NAMESPACE = 'root/cimv2' if len(u'\U00010122') == 2: # This is a "narrow" Unicode build of Python (the normal case). _ILLEGAL_XML_CHARS_RE = re.compile( u'([\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE\uFFFF])') else: # This is a "wide" Unicode build of Python. _ILLEGAL_XML_CHARS_RE = re.compile( u'([\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE\uFFFF])') _ILL_FORMED_UTF8_RE = re.compile( '(\xED[\xA0-\xBF][\x80-\xBF])') # U+D800...U+DFFF def _check_classname(val): """ Validate a classname. At this point, only the type is validated to be a string. """ if not is_text(val): raise ValueError("string expected for classname, not %s" % repr(val)) def check_utf8_xml_chars(utf8_xml, meaning): """ Examine a UTF-8 encoded XML string and raise a `pywbem.ParseError` exception if the response contains Bytes that are invalid UTF-8 sequences (incorrectly encoded or ill-formed) or that are invalid XML characters. This function works in both "wide" and "narrow" Unicode builds of Python and supports the full range of Unicode characters from U+0000 to U+10FFFF. This function is just a workaround for the bad error handling of Python's `xml.dom.minidom` package. It replaces the not very informative `ExpatError` "not well-formed (invalid token): line: x, column: y" with a `pywbem.ParseError` providing more useful information. :Parameters: utf8_xml : string The UTF-8 encoded XML string to be examined. meaning : string Short text with meaning of the XML string, for messages in exceptions. :Exceptions: `TypeError`, if invoked with incorrect Python object type for `utf8_xml`. `pywbem.ParseError`, if `utf8_xml` contains Bytes that are invalid UTF-8 sequences (incorrectly encoded or ill-formed) or invalid XML characters. Notes on Unicode support in Python: (1) For internally representing Unicode characters in the unicode type, a "wide" Unicode build of Python uses UTF-32, while a "narrow" Unicode build uses UTF-16. The difference is visible to Python programs for Unicode characters assigned to code points above U+FFFF: The "narrow" build uses 2 characters (a surrogate pair) for them, while the "wide" build uses just 1 character. This affects all position- and length-oriented functions, such as `len()` or string slicing. (2) In a "wide" Unicode build of Python, the Unicode characters assigned to code points U+10000 to U+10FFFF are represented directly (using code points U+10000 to U+10FFFF) and the surrogate code points U+D800...U+DFFF are never used; in a "narrow" Unicode build of Python, the Unicode characters assigned to code points U+10000 to U+10FFFF are represented using pairs of the surrogate code points U+D800...U+DFFF. Notes on the Unicode code points U+D800...U+DFFF ("surrogate code points"): (1) These code points have no corresponding Unicode characters assigned, because they are reserved for surrogates in the UTF-16 encoding. (2) The UTF-8 encoding can technically represent the surrogate code points. ISO/IEC 10646 defines that a UTF-8 sequence containing the surrogate code points is ill-formed, but it is technically possible that such a sequence is in a UTF-8 encoded XML string. (3) The Python escapes ``\\u`` and ``\\U`` used in literal strings can represent the surrogate code points (as well as all other code points, regardless of whether they are assigned to Unicode characters). (4) The Python `str.encode()` and `str.decode()` functions successfully translate the surrogate code points back and forth for encoding UTF-8. For example, ``'\\xed\\xb0\\x80'.decode("utf-8") = u'\\udc00'``. (5) Because Python supports the encoding and decoding of UTF-8 sequences also for the surrogate code points, the "narrow" Unicode build of Python can be (mis-)used to transport each surrogate unit separately encoded in (ill-formed) UTF-8. For example, code point U+10122 can be (illegally) created from a sequence of code points U+D800,U+DD22 represented in UTF-8: ``'\\xED\\xA0\\x80\\xED\\xB4\\xA2'.decode("utf-8") = u'\\U00010122'`` while the correct UTF-8 sequence for this code point is: ``u'\\U00010122'.encode("utf-8") = '\\xf0\\x90\\x84\\xa2'`` Notes on XML characters: (1) The legal XML characters are defined in W3C XML 1.0 (Fith Edition): :: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] These are the code points of Unicode characters using a non-surrogate representation. """ context_before = 16 # number of chars to print before any bad chars context_after = 16 # number of chars to print after any bad chars if not isinstance(utf8_xml, str): raise TypeError("utf8_xml argument does not have str type, but %s" % \ type(utf8_xml)) # Check for ill-formed UTF-8 sequences. This needs to be done # before the str type gets decoded to unicode, because afterwards # surrogates produced from ill-formed UTF-8 cannot be distinguished from # legally produced surrogates (for code points above U+FFFF). ic_list = list() for m in _ILL_FORMED_UTF8_RE.finditer(utf8_xml): ic_pos = m.start(1) ic_seq = m.group(1) ic_list.append((ic_pos, ic_seq)) if len(ic_list) > 0: exc_txt = "Ill-formed (surrogate) UTF-8 Byte sequences found in %s:" % \ meaning for (ic_pos, ic_seq) in ic_list: exc_txt += "\n At offset %d:" % ic_pos for c in ic_seq: exc_txt += " 0x%02X" % ord(c) cpos1 = max(ic_pos - context_before, 0) cpos2 = min(ic_pos + context_after, len(utf8_xml)) exc_txt += ", CIM-XML snippet: %r" % utf8_xml[cpos1:cpos2] raise ParseError(exc_txt) # Check for incorrectly encoded UTF-8 sequences. # @ibm.13@ Simplified logic (removed loop). try: utf8_xml_u = utf8_xml.decode("utf-8") except UnicodeDecodeError as exc: # Only raised for incorrectly encoded UTF-8 sequences; technically # correct sequences that are ill-formed (e.g. representing surrogates) # do not cause this exception to be raised. # If more than one incorrectly encoded sequence is present, only # information about the first one is returned in the exception object. # Also, the stated reason (in _msg) is not always correct. _codec, _str, _p1, _p2, _msg = exc.args exc_txt = "Incorrectly encoded UTF-8 Byte sequences found in %s" % \ meaning exc_txt += "\n At offset %d:" % _p1 for c in utf8_xml[_p1:_p2 + 1]: exc_txt += " 0x%02X" % ord(c) cpos1 = max(_p1 - context_before, 0) cpos2 = min(_p2 + context_after, len(utf8_xml)) exc_txt += ", CIM-XML snippet: %r" % utf8_xml[cpos1:cpos2] raise ParseError(exc_txt) # Now we know the Unicode characters are valid. # Check for Unicode characters that cannot legally be represented as XML # characters. ic_list = list() last_ic_pos = -2 for m in _ILLEGAL_XML_CHARS_RE.finditer(utf8_xml_u): ic_pos = m.start(1) ic_char = m.group(1) if ic_pos > last_ic_pos + 1: ic_list.append((ic_pos, ic_char)) last_ic_pos = ic_pos if len(ic_list) > 0: exc_txt = "Invalid XML characters found in %s:" % meaning for (ic_pos, ic_char) in ic_list: cpos1 = max(ic_pos - context_before, 0) cpos2 = min(ic_pos + context_after, len(utf8_xml_u)) exc_txt += "\n At offset %d: U+%04X, CIM-XML snippet: %r" % \ (ic_pos, ord(ic_char), utf8_xml_u[cpos1:cpos2]) raise ParseError(exc_txt) return utf8_xml class Error(Exception): pass class CIMError(Error): """ Exception indicating that the WBEM server has returned an error response with a CIM status code. The exception value is a tuple of ``(error_code, description, exception_obj)``, where: * ``error_code``: the numeric CIM status code. See `cim_constants` for constants defining CIM status code values. * ``description``: a string (`unicode` or
"""Generated message classes for sqladmin version v1beta3. Creates and configures Cloud SQL instances, which provide fully-managed MySQL databases. """ # NOTE: This file is autogenerated and should not be edited by hand. from googlecloudsdk.third_party.apitools.base.protorpclite import message_types as _message_types from googlecloudsdk.third_party.apitools.base.protorpclite import messages as _messages package = 'sqladmin' class BackupConfiguration(_messages.Message): """Database instance backup configuration. Fields: binaryLogEnabled: Whether binary log is enabled. If backup configuration is disabled, binary log must be disabled as well. enabled: Whether this configuration is enabled. id: Identifier for this configuration. This gets generated automatically when a backup configuration is created. kind: This is always sql#backupConfiguration. startTime: Start time for the daily backup configuration in UTC timezone in the 24 hour format - HH:MM. """ binaryLogEnabled = _messages.BooleanField(1) enabled = _messages.BooleanField(2) id = _messages.StringField(3) kind = _messages.StringField(4, default=u'sql#backupConfiguration') startTime = _messages.StringField(5) class BackupRun(_messages.Message): """A database instance backup run resource. Fields: backupConfiguration: Backup Configuration identifier. dueTime: The due time of this run in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. endTime: The time the backup operation completed in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. enqueuedTime: The time the run was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. error: Information about why the backup operation failed. This is only present if the run has the FAILED status. instance: Name of the database instance. kind: This is always sql#backupRun. startTime: The time the backup operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. status: The status of this run. """ backupConfiguration = _messages.StringField(1) dueTime = _message_types.DateTimeField(2) endTime = _message_types.DateTimeField(3) enqueuedTime = _message_types.DateTimeField(4) error = _messages.MessageField('OperationError', 5) instance = _messages.StringField(6) kind = _messages.StringField(7, default=u'sql#backupRun') startTime = _message_types.DateTimeField(8) status = _messages.StringField(9) class BackupRunsListResponse(_messages.Message): """Backup run list results. Fields: items: A list of backup runs in reverse chronological order of the enqueued time. kind: This is always sql#backupRunsList. nextPageToken: The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results. """ items = _messages.MessageField('BackupRun', 1, repeated=True) kind = _messages.StringField(2, default=u'sql#backupRunsList') nextPageToken = _messages.StringField(3) class BinLogCoordinates(_messages.Message): """Binary log coordinates. Fields: binLogFileName: Name of the binary log file for a Cloud SQL instance. binLogPosition: Position (offset) within the binary log file. kind: This is always sql#binLogCoordinates. """ binLogFileName = _messages.StringField(1) binLogPosition = _messages.IntegerField(2) kind = _messages.StringField(3, default=u'sql#binLogCoordinates') class CloneContext(_messages.Message): """Database instance clone context. Fields: binLogCoordinates: Binary log coordinates, if specified, indentify the position up to which the source instance should be cloned. If not specified, the source instance is cloned up to the most recent binary log coordinates. destinationInstanceName: Name of the Cloud SQL instance to be created as a clone. kind: This is always sql#cloneContext. sourceInstanceName: Name of the Cloud SQL instance to be cloned. """ binLogCoordinates = _messages.MessageField('BinLogCoordinates', 1) destinationInstanceName = _messages.StringField(2) kind = _messages.StringField(3, default=u'sql#cloneContext') sourceInstanceName = _messages.StringField(4) class DatabaseFlags(_messages.Message): """MySQL flags for Cloud SQL instances. Fields: name: The name of the flag. These flags are passed at instance startup, so include both MySQL server options and MySQL system variables. Flags should be specified with underscores, not hyphens. For more information, see Configuring MySQL Flags in the Google Cloud SQL documentation, as well as the official MySQL documentation for server options and system variables. value: The value of the flag. Booleans should be set to on for true and off for false. This field must be omitted if the flag doesn't take a value. """ name = _messages.StringField(1) value = _messages.StringField(2) class DatabaseInstance(_messages.Message): """A Cloud SQL instance resource. Fields: currentDiskSize: The current disk usage of the instance in bytes. databaseVersion: The database engine type and version. Can be MYSQL_5_5 or MYSQL_5_6. Defaults to MYSQL_5_5. The databaseVersion cannot be changed after instance creation. etag: HTTP 1.1 Entity tag for the resource. instance: Name of the Cloud SQL instance. This does not include the project ID. instanceType: The instance type. This can be one of the following. CLOUD_SQL_INSTANCE: Regular Cloud SQL instance. READ_REPLICA_INSTANCE: Cloud SQL instance acting as a read-replica. ipAddresses: The assigned IP addresses for the instance. ipv6Address: The IPv6 address assigned to the instance. kind: This is always sql#instance. masterInstanceName: The name of the instance which will act as master in the replication setup. maxDiskSize: The maximum disk size of the instance in bytes. project: The project ID of the project containing the Cloud SQL instance. The Google apps domain is prefixed if applicable. region: The geographical region. Can be us-central, asia-east1 or europe- west1. Defaults to us-central. The region can not be changed after instance creation. replicaNames: The replicas of the instance. serverCaCert: SSL configuration. serviceAccountEmailAddress: The service account email address assigned to the instance. settings: The user settings. state: The current serving state of the Cloud SQL instance. This can be one of the following. RUNNABLE: The instance is running, or is ready to run when accessed. SUSPENDED: The instance is not available, for example due to problems with billing. PENDING_CREATE: The instance is being created. MAINTENANCE: The instance is down for maintenance. UNKNOWN_STATE: The state of the instance is unknown. """ currentDiskSize = _messages.IntegerField(1) databaseVersion = _messages.StringField(2) etag = _messages.StringField(3) instance = _messages.StringField(4) instanceType = _messages.StringField(5) ipAddresses = _messages.MessageField('IpMapping', 6, repeated=True) ipv6Address = _messages.StringField(7) kind = _messages.StringField(8, default=u'sql#instance') masterInstanceName = _messages.StringField(9) maxDiskSize = _messages.IntegerField(10) project = _messages.StringField(11) region = _messages.StringField(12) replicaNames = _messages.StringField(13, repeated=True) serverCaCert = _messages.MessageField('SslCert', 14) serviceAccountEmailAddress = _messages.StringField(15) settings = _messages.MessageField('Settings', 16) state = _messages.StringField(17) class ExportContext(_messages.Message): """Database instance export context. Fields: database: Databases (for example, guestbook) from which the export is made. If unspecified, all databases are exported. kind: This is always sql#exportContext. table: Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. uri: The path to the file in Google Cloud Storage where the export will be stored, or where it was already stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the operation fails. If the filename ends with .gz, the contents are compressed. """ database = _messages.StringField(1, repeated=True) kind = _messages.StringField(2, default=u'sql#exportContext') table = _messages.StringField(3, repeated=True) uri = _messages.StringField(4) class Flag(_messages.Message): """A Google Cloud SQL service flag resource. Fields: allowedStringValues: For STRING flags, a list of strings that the value can be set to. appliesTo: The database version this flag applies to. Currently this can only be [MYSQL_5_5]. kind: This is always sql#flag. maxValue: For INTEGER flags, the maximum allowed value. minValue: For INTEGER flags, the minimum allowed value. name: This is the name of the flag. Flag names always use underscores, not hyphens, e.g. max_allowed_packet type: The type of the flag. Flags are typed to being BOOLEAN, STRING, INTEGER or NONE. NONE is used for flags which do not take a value, such as skip_grant_tables. """ allowedStringValues = _messages.StringField(1, repeated=True) appliesTo = _messages.StringField(2, repeated=True) kind = _messages.StringField(3, default=u'sql#flag') maxValue = _messages.IntegerField(4) minValue = _messages.IntegerField(5) name = _messages.StringField(6) type = _messages.StringField(7) class FlagsListResponse(_messages.Message): """Flags list response. Fields: items: List of flags. kind: This is always sql#flagsList. """ items = _messages.MessageField('Flag', 1, repeated=True) kind = _messages.StringField(2, default=u'sql#flagsList') class ImportContext(_messages.Message): """Database instance import context. Fields: database: The database (for example, guestbook) to which the import is made. If not set, it is assumed that the database is specified in the file to be imported. kind: This is always sql#importContext. uri: A path to the MySQL dump file in Google Cloud Storage from which the import is made. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are also supported. """ database = _messages.StringField(1) kind = _messages.StringField(2, default=u'sql#importContext') uri = _messages.StringField(3, repeated=True) class InstanceOperation(_messages.Message): """An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly. Fields: endTime: The time this operation finished in UTC timezone in RFC 3339 format, for example
# -*- coding: utf-8 -*- """ oauthlib.oauth2.rfc6749.grant_types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ from __future__ import unicode_literals, absolute_import import json import logging from oauthlib import common from oauthlib.uri_validate import is_absolute_uri from .base import GrantTypeBase from .. import errors log = logging.getLogger(__name__) class AuthorizationCodeGrant(GrantTypeBase): """`Authorization Code Grant`_ The authorization code grant type is used to obtain both access tokens and refresh tokens and is optimized for confidential clients. Since this is a redirection-based flow, the client must be capable of interacting with the resource owner's user-agent (typically a web browser) and capable of receiving incoming requests (via redirection) from the authorization server:: +----------+ | Resource | | Owner | | | +----------+ ^ | (B) +----|-----+ Client Identifier +---------------+ | -+----(A)-- & Redirection URI ---->| | | User- | | Authorization | | Agent -+----(B)-- User authenticates --->| Server | | | | | | -+----(C)-- Authorization Code ---<| | +-|----|---+ +---------------+ | | ^ v (A) (C) | | | | | | ^ v | | +---------+ | | | |>---(D)-- Authorization Code ---------' | | Client | & Redirection URI | | | | | |<---(E)----- Access Token -------------------' +---------+ (w/ Optional Refresh Token) Note: The lines illustrating steps (A), (B), and (C) are broken into two parts as they pass through the user-agent. Figure 3: Authorization Code Flow The flow illustrated in Figure 3 includes the following steps: (A) The client initiates the flow by directing the resource owner's user-agent to the authorization endpoint. The client includes its client identifier, requested scope, local state, and a redirection URI to which the authorization server will send the user-agent back once access is granted (or denied). (B) The authorization server authenticates the resource owner (via the user-agent) and establishes whether the resource owner grants or denies the client's access request. (C) Assuming the resource owner grants access, the authorization server redirects the user-agent back to the client using the redirection URI provided earlier (in the request or during client registration). The redirection URI includes an authorization code and any local state provided by the client earlier. (D) The client requests an access token from the authorization server's token endpoint by including the authorization code received in the previous step. When making the request, the client authenticates with the authorization server. The client includes the redirection URI used to obtain the authorization code for verification. (E) The authorization server authenticates the client, validates the authorization code, and ensures that the redirection URI received matches the URI used to redirect the client in step (C). If valid, the authorization server responds back with an access token and, optionally, a refresh token. .. _`Authorization Code Grant`: http://tools.ietf.org/html/rfc6749#section-4.1 """ default_response_mode = 'query' response_types = ['code'] def create_authorization_code(self, request): """Generates an authorization grant represented as a dictionary.""" grant = {'code': common.generate_token()} if hasattr(request, 'state') and request.state: grant['state'] = request.state log.debug('Created authorization code grant %r for request %r.', grant, request) return grant def create_authorization_response(self, request, token_handler): """ The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format, per `Appendix B`_: response_type REQUIRED. Value MUST be set to "code" for standard OAuth2 authorization flow. For OpenID Connect it must be one of "code token", "code id_token", or "code token id_token" - we essentially test that "code" appears in the response_type. client_id REQUIRED. The client identifier as described in `Section 2.2`_. redirect_uri OPTIONAL. As described in `Section 3.1.2`_. scope OPTIONAL. The scope of the access request as described by `Section 3.3`_. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in `Section 10.12`_. The client directs the resource owner to the constructed URI using an HTTP redirection response, or by other means available to it via the user-agent. :param request: oauthlib.commong.Request :param token_handler: A token handler instace, for example of type oauthlib.oauth2.BearerToken. :returns: headers, body, status :raises: FatalClientError on invalid redirect URI or client id. ValueError if scopes are not set on the request object. A few examples:: >>> from your_validator import your_validator >>> request = Request('https://example.com/authorize?client_id=valid' ... '&redirect_uri=http%3A%2F%2Fclient.com%2F') >>> from oauthlib.common import Request >>> from oauthlib.oauth2 import AuthorizationCodeGrant, BearerToken >>> token = BearerToken(your_validator) >>> grant = AuthorizationCodeGrant(your_validator) >>> grant.create_authorization_response(request, token) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "oauthlib/oauth2/rfc6749/grant_types.py", line 513, in create_authorization_response raise ValueError('Scopes must be set on post auth.') ValueError: Scopes must be set on post auth. >>> request.scopes = ['authorized', 'in', 'some', 'form'] >>> grant.create_authorization_response(request, token) (u'http://client.com/?error=invalid_request&error_description=Missing+response_type+parameter.', None, None, 400) >>> request = Request('https://example.com/authorize?client_id=valid' ... '&redirect_uri=http%3A%2F%2Fclient.com%2F' ... '&response_type=code') >>> request.scopes = ['authorized', 'in', 'some', 'form'] >>> grant.create_authorization_response(request, token) (u'http://client.com/?code=u3F05aEObJuP2k7DordviIgW5wl52N', None, None, 200) >>> # If the client id or redirect uri fails validation >>> grant.create_authorization_response(request, token) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "oauthlib/oauth2/rfc6749/grant_types.py", line 515, in create_authorization_response >>> grant.create_authorization_response(request, token) File "oauthlib/oauth2/rfc6749/grant_types.py", line 591, in validate_authorization_request oauthlib.oauth2.rfc6749.errors.InvalidClientIdError .. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B .. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2 .. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2 .. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3 .. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12 """ try: # request.scopes is only mandated in post auth and both pre and # post auth use validate_authorization_request if not request.scopes: raise ValueError('Scopes must be set on post auth.') self.validate_authorization_request(request) log.debug('Pre resource owner authorization validation ok for %r.', request) # If the request fails due to a missing, invalid, or mismatching # redirection URI, or if the client identifier is missing or invalid, # the authorization server SHOULD inform the resource owner of the # error and MUST NOT automatically redirect the user-agent to the # invalid redirection URI. except errors.FatalClientError as e: log.debug('Fatal client error during validation of %r. %r.', request, e) raise # If the resource owner denies the access request or if the request # fails for reasons other than a missing or invalid redirection URI, # the authorization server informs the client by adding the following # parameters to the query component of the redirection URI using the # "application/x-www-form-urlencoded" format, per Appendix B: # http://tools.ietf.org/html/rfc6749#appendix-B except errors.OAuth2Error as e: log.debug('Client error during validation of %r. %r.', request, e) request.redirect_uri = request.redirect_uri or self.error_uri redirect_uri = common.add_params_to_uri( request.redirect_uri, e.twotuples, fragment=request.response_mode == "fragment") return {'Location': redirect_uri}, None, 302 grant = self.create_authorization_code(request) for modifier in self._code_modifiers: grant = modifier(grant, token_handler, request) log.debug('Saving grant %r for %r.', grant, request) self.request_validator.save_authorization_code( request.client_id, grant, request) return self.prepare_authorization_response( request, grant, {}, None, 302) def create_token_response(self, request, token_handler): """Validate the authorization code. The client MUST NOT use the authorization code more than once. If an authorization code is used more than once, the authorization server MUST deny the request and SHOULD revoke (when possible) all tokens previously issued based on that authorization code. The authorization code is bound to the client identifier and redirection URI. """ headers = { 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache', } try: self.validate_token_request(request) log.debug('Token request validation ok for %r.', request) except errors.OAuth2Error as e: log.debug('Client error during validation of %r. %r.', request, e) return headers, e.json, e.status_code token = token_handler.create_token(request, refresh_token=self.refresh_token, save_token=False) for modifier in self._token_modifiers: token = modifier(token, token_handler, request) self.request_validator.save_token(token, request) self.request_validator.invalidate_authorization_code( request.client_id, request.code, request) return headers, json.dumps(token), 200 def validate_authorization_request(self, request): """Check the authorization request for normal and fatal errors. A normal error could be a missing response_type parameter or the client attempting to access scope it is not allowed to ask authorization for. Normal errors can safely be included in the redirection URI and sent back to the client. Fatal errors occur when the client_id or redirect_uri is invalid or missing. These must be caught by the provider and handled, how this is done is outside of the scope of OAuthLib but showing an error page describing the
:param certificate_authority: certificate authority's X509 certificate. :return: None .. versionadded:: 0.10 """ if not isinstance(certificate_authority, X509): raise TypeError("certificate_authority must be an X509 instance") add_result = _lib.SSL_CTX_add_client_CA( self._context, certificate_authority._x509 ) _openssl_assert(add_result == 1) def set_timeout(self, timeout): """ Set the timeout for newly created sessions for this Context object to *timeout*. The default value is 300 seconds. See the OpenSSL manual for more information (e.g. :manpage:`SSL_CTX_set_timeout(3)`). :param timeout: The timeout in (whole) seconds :return: The previous session timeout """ if not isinstance(timeout, int): raise TypeError("timeout must be an integer") return _lib.SSL_CTX_set_timeout(self._context, timeout) def get_timeout(self): """ Retrieve session timeout, as set by :meth:`set_timeout`. The default is 300 seconds. :return: The session timeout """ return _lib.SSL_CTX_get_timeout(self._context) def set_info_callback(self, callback): """ Set the information callback to *callback*. This function will be called from time to time during SSL handshakes. :param callback: The Python callback to use. This should take three arguments: a Connection object and two integers. The first integer specifies where in the SSL handshake the function was called, and the other the return code from a (possibly failed) internal function call. :return: None """ @wraps(callback) def wrapper(ssl, where, return_code): callback(Connection._reverse_mapping[ssl], where, return_code) self._info_callback = _ffi.callback( "void (*)(const SSL *, int, int)", wrapper ) _lib.SSL_CTX_set_info_callback(self._context, self._info_callback) @_requires_keylog def set_keylog_callback(self, callback): """ Set the TLS key logging callback to *callback*. This function will be called whenever TLS key material is generated or received, in order to allow applications to store this keying material for debugging purposes. :param callback: The Python callback to use. This should take two arguments: a Connection object and a bytestring that contains the key material in the format used by NSS for its SSLKEYLOGFILE debugging output. :return: None """ @wraps(callback) def wrapper(ssl, line): line = _ffi.string(line) callback(Connection._reverse_mapping[ssl], line) self._keylog_callback = _ffi.callback( "void (*)(const SSL *, const char *)", wrapper ) _lib.SSL_CTX_set_keylog_callback(self._context, self._keylog_callback) def get_app_data(self): """ Get the application data (supplied via :meth:`set_app_data()`) :return: The application data """ return self._app_data def set_app_data(self, data): """ Set the application data (will be returned from get_app_data()) :param data: Any Python object :return: None """ self._app_data = data def get_cert_store(self): """ Get the certificate store for the context. This can be used to add "trusted" certificates without using the :meth:`load_verify_locations` method. :return: A X509Store object or None if it does not have one. """ store = _lib.SSL_CTX_get_cert_store(self._context) if store == _ffi.NULL: # TODO: This is untested. return None pystore = X509Store.__new__(X509Store) pystore._store = store return pystore def set_options(self, options): """ Add options. Options set before are not cleared! This method should be used with the :const:`OP_*` constants. :param options: The options to add. :return: The new option bitmask. """ if not isinstance(options, int): raise TypeError("options must be an integer") return _lib.SSL_CTX_set_options(self._context, options) def set_mode(self, mode): """ Add modes via bitmask. Modes set before are not cleared! This method should be used with the :const:`MODE_*` constants. :param mode: The mode to add. :return: The new mode bitmask. """ if not isinstance(mode, int): raise TypeError("mode must be an integer") return _lib.SSL_CTX_set_mode(self._context, mode) def set_tlsext_servername_callback(self, callback): """ Specify a callback function to be called when clients specify a server name. :param callback: The callback function. It will be invoked with one argument, the Connection instance. .. versionadded:: 0.13 """ @wraps(callback) def wrapper(ssl, alert, arg): callback(Connection._reverse_mapping[ssl]) return 0 self._tlsext_servername_callback = _ffi.callback( "int (*)(SSL *, int *, void *)", wrapper ) _lib.SSL_CTX_set_tlsext_servername_callback( self._context, self._tlsext_servername_callback ) def set_tlsext_use_srtp(self, profiles): """ Enable support for negotiating SRTP keying material. :param bytes profiles: A colon delimited list of protection profile names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``. :return: None """ if not isinstance(profiles, bytes): raise TypeError("profiles must be a byte string.") _openssl_assert( _lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0 ) @_requires_alpn def set_alpn_protos(self, protos): """ Specify the protocols that the client is prepared to speak after the TLS connection has been negotiated using Application Layer Protocol Negotiation. :param protos: A list of the protocols to be offered to the server. This list should be a Python list of bytestrings representing the protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``. """ # Different versions of OpenSSL are inconsistent about how they handle # empty proto lists (see #1043), so we avoid the problem entirely by # rejecting them ourselves. if not protos: raise ValueError("at least one protocol must be specified") # Take the list of protocols and join them together, prefixing them # with their lengths. protostr = b"".join( chain.from_iterable((bytes((len(p),)), p) for p in protos) ) # Build a C string from the list. We don't need to save this off # because OpenSSL immediately copies the data out. input_str = _ffi.new("unsigned char[]", protostr) # https://www.openssl.org/docs/man1.1.0/man3/SSL_CTX_set_alpn_protos.html: # SSL_CTX_set_alpn_protos() and SSL_set_alpn_protos() # return 0 on success, and non-0 on failure. # WARNING: these functions reverse the return value convention. _openssl_assert( _lib.SSL_CTX_set_alpn_protos( self._context, input_str, len(protostr) ) == 0 ) @_requires_alpn def set_alpn_select_callback(self, callback): """ Specify a callback function that will be called on the server when a client offers protocols using ALPN. :param callback: The callback function. It will be invoked with two arguments: the Connection, and a list of offered protocols as bytestrings, e.g ``[b'http/1.1', b'spdy/2']``. It can return one of those bytestrings to indicate the chosen protocol, the empty bytestring to terminate the TLS connection, or the :py:obj:`NO_OVERLAPPING_PROTOCOLS` to indicate that no offered protocol was selected, but that the connection should not be aborted. """ self._alpn_select_helper = _ALPNSelectHelper(callback) self._alpn_select_callback = self._alpn_select_helper.callback _lib.SSL_CTX_set_alpn_select_cb( self._context, self._alpn_select_callback, _ffi.NULL ) def _set_ocsp_callback(self, helper, data): """ This internal helper does the common work for ``set_ocsp_server_callback`` and ``set_ocsp_client_callback``, which is almost all of it. """ self._ocsp_helper = helper self._ocsp_callback = helper.callback if data is None: self._ocsp_data = _ffi.NULL else: self._ocsp_data = _ffi.new_handle(data) rc = _lib.SSL_CTX_set_tlsext_status_cb( self._context, self._ocsp_callback ) _openssl_assert(rc == 1) rc = _lib.SSL_CTX_set_tlsext_status_arg(self._context, self._ocsp_data) _openssl_assert(rc == 1) def set_ocsp_server_callback(self, callback, data=None): """ Set a callback to provide OCSP data to be stapled to the TLS handshake on the server side. :param callback: The callback function. It will be invoked with two arguments: the Connection, and the optional arbitrary data you have provided. The callback must return a bytestring that contains the OCSP data to staple to the handshake. If no OCSP data is available for this connection, return the empty bytestring. :param data: Some opaque data that will be passed into the callback function when called. This can be used to avoid needing to do complex data lookups or to keep track of what context is being used. This parameter is optional. """ helper = _OCSPServerCallbackHelper(callback) self._set_ocsp_callback(helper, data) def set_ocsp_client_callback(self, callback, data=None): """ Set a callback to validate OCSP data stapled to the TLS handshake on the client side. :param callback: The callback function. It will be invoked with three arguments: the Connection, a bytestring containing the stapled OCSP assertion, and the optional arbitrary data you have provided. The callback must return a boolean that indicates the result of validating the OCSP data: ``True`` if the OCSP data is valid and the certificate can be trusted, or ``False`` if either the OCSP data is invalid or the certificate has been revoked. :param data: Some opaque data that will be passed into the callback function when called. This can be used to avoid needing to do complex data lookups or to keep track of what context is being used. This parameter is optional. """ helper = _OCSPClientCallbackHelper(callback) self._set_ocsp_callback(helper, data) def set_cookie_generate_callback(self, callback): self._cookie_generate_helper = _CookieGenerateCallbackHelper(callback) _lib.SSL_CTX_set_cookie_generate_cb( self._context, self._cookie_generate_helper.callback, ) def set_cookie_verify_callback(self, callback): self._cookie_verify_helper = _CookieVerifyCallbackHelper(callback) _lib.SSL_CTX_set_cookie_verify_cb( self._context, self._cookie_verify_helper.callback, ) class Connection: _reverse_mapping = WeakValueDictionary() def __init__(self, context, socket=None): """ Create a new Connection object, using the given OpenSSL.SSL.Context instance and socket. :param context: An SSL Context to use for this connection :param socket: The socket to use for transport layer """ if not isinstance(context, Context): raise TypeError("context must be a Context instance") ssl = _lib.SSL_new(context._context) self._ssl = _ffi.gc(ssl, _lib.SSL_free) # We set
<reponame>Mvmo/acton # Copyright (C) 2019-2021 Data Ductus AB # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Python version of bytesview ################################################## ### Bytesview class Bytesview(): _bytes : bytes = None _pre = None _start : int = 0 _end : int = 0 _eof : bool = False def __init__(self, b, n=None, start=None, pre=None, closed=False): self._bytes = b self._pre = pre self._start = 0 if start is None else start if n is not None: self._end = self._start + n else: self._end = len(b)-self._start self._eof = closed ### analysis of Bytesview objects def n(self): return self._end - self._start + (self._pre.n() if self._pre is not None else 0) def to_bytes(self): these = self._bytes[self._start:self._end] if self._pre is None: return these else: return self._pre.to_bytes() + these ### de-lousing def show(self): if self._pre is not None: self._pre.show() print(self._bytes, "[{}:{}]".format(self._start,self._end)) if self._eof: print("_eof = {}".format(self._eof)) ### consuming data def _consume(self, amount): if self._pre is not None: (pre, remaining) = self._pre._consume(amount) else: pre = None remaining = amount thislen = self._end - self._start if remaining < thislen: newthis = Bytesview(self._bytes, n=thislen-remaining, start=self._start+remaining, pre=pre) newremaining = 0 else: newthis = pre newremaining = remaining - thislen return(newthis,newremaining) def consume(self, amount): # assert(self.n()>=amount) (newview, remaining) = self._consume(amount) if remaining > 0: # raise IndexError return None if newview is not None: return newview else: return Bytesview(b'') # def _initialbytes(self, amount=a, end=-1, endskip=0): # # # def initialbytes(self, end=-1, endskip=0): # # remove data at end of bytesview. # # # # end == -1 implies trimming from eof (so error if -1 and not closed) # # end >= 0 implies end-endskip is the index where returned bytesview object ends. # # remove endskip bytes before end. # if end == -1 and not self._eof: # raise ValueError("Trim(end=-1) on non-closed Bytesview") # # if self._pre is None: # # thislen = self._end - self._start # thislen -= # if self._pre is None: # newstartskip = startskip # newend = end # newenskip = endskip # return Bytesview(self._bytes, start=self._start+ # else: # # if start is None: # if end is None or end == -1: # these = self._bytes[self._start:self._end] # else: # these = self._bytes[self._start:self._start+end] # else: # if end is None or end == -1: # these = self._bytes[self._start+start:self._end] # else: # these = self._bytes[self._start+start:self._start+end] # if self._pre is None: # return these # else: # return self._pre.to_bytes() + these ### reading out data from a Bytesview object (creating a new) ### ### Raises IncompleteReadError if Bytesview object does not contain ### enough data to fulfill request. def _read(self, n): # n is always >= 0 if self._pre is not None: (remaining, initial) = self._pre._read(n) else: remaining = n initial = None thislen = self._end - self._start if remaining == 0: return (remaining,initial) elif remaining >= thislen: return (remaining-thislen, self) else: # 0 < remaining < thislen return (0, Bytesview(self._bytes, start=self._start, n=remaining, pre=initial)) def read(self, n=-1): # (int) -> (bytes, Bytesview) if n == -1: if not self._eof: raise IncompleteReadError("read(-1) on non-closed Bytesview") return None else: return self else: # deliver n bytes, if there are n bytes. # if not, deliver available bytes if closed, error if not closed thislen = self._end - self._start if self._pre is not None: (remaining, initial) = self._pre._read(n) else: remaining = n initial = None if remaining == 0: return initial if initial is not None else Bytesview(b'') elif remaining < thislen: return Bytesview(self._bytes, n=remaining, start=self._start, pre=initial) elif remaining > thislen: if self._eof: return self else: #raise IncompleteReadError("read(n) on non-closed Bytesview with less than n bytes") return None else: # remaining == thislen return self def readline(self): return self.readuntil (separator=b'\n') def _readexactly(self, n): thislen = self._end - self._start if self._pre: (initial, remains) = self._pre._readexactly(n) else: (initial, remains) = (None, n) if remains == 0: return (initial, remains) elif remains >= thislen: return (self, remains-thislen) else: # remains < thislen return (Bytesview(self._bytes, start=self._start, n=remains, pre=initial), 0) def readexactly(self, n): (initial, remains) = self._readexactly(n) if remains > 0: #raise IncompleteReadError("readexactly(): Too few bytes available") return None if initial is None: initial = Bytesview(b'') return initial def _readuntil(self, separator, seplen): # len(last) < len(separator) if self._pre is not None: (last, initial, found) = self._pre._readuntil(separator, seplen) else: (last, initial, found) = (b'', None, False) # last is potential beginning of separator at end of previous Bytesview(s) if found: return (b'',initial, found) else: buf = last+self._bytes[self._start:self._start+seplen-1] idx = buf.find(separator) if idx >= 0: # found it! # end of separator at ix+seplen-len(last) #print (buf, len(buf), last, len(last), sep, idx) return (b'', Bytesview(self._bytes, n=idx+seplen-len(last), start=self._start, pre=self._pre), True) idx = self._bytes.find(separator, self._start) if idx >= 0: # found it! return (b'', Bytesview(self._bytes, n=idx-self._start+seplen, start=self._start, pre=self._pre), True) thislen = self._end-self._start if thislen >= seplen-1: last = self._bytes[self._end-(seplen-1):self._end] else: last = last[thislen-(seplen-1):] + self._bytes[self._start:self._end] return (last, self, False) def readuntil(self, separator=b'\n'): seplen = len(separator) (_, initial, found) = self._readuntil(separator, seplen) if found: return initial if initial is not None else Bytesview(b'') else: if self._eof: #raise IncompleteReadError("Separator not found and Bytesview closed") return None else: #raise IncompleteReadError("Separator not found") return None def at_eof(self): return self._eof and self._pre is None and self._start == self._end ### feeding data & closing def append(self, b, n=None): if self._eof: #raise ValueError("append() on closed Bytesview") return None if self._start < self._end: return Bytesview(b, n=n, pre=self) else: return Bytesview(b, n=n) def write(self, b, n=None): # same as append(self, b, n=None) if self._eof: #raise ValueError("write() on closed Bytesview") return None return self.append(b, n) def writelines(self,data): if self._eof: #raise ValueError("writelines() on closed Bytesview") return None for b in data: self.append(b) def close(self): if self._eof: return self return Bytesview(self._bytes, n=self._end-self._start, start=self._start, pre=self._pre, closed=True) ################################################## ### Unit tests def report(bv, name): print (name+" = ", bv) print (name+".n() = ", bv.n()) # print (name+"limits() = ", bv.limits()) print (name+".to_bytes() = ", bv.to_bytes()) print (name+".show():") bv.show() by = b'0123456789abcdef' #l = len(by) #print("by = ",by) #print("l = ", l) # #bv1 = Bytesview(by) #bv2 = Bytesview(by, n=16) # #report(bv1, "bv1") #report(bv2, "bv2") bv3 = bv2.append(by, n=16) #report(bv2, "bv2") #report(bv3, "bv3") bv4 = bv3.append(by) #report(bv4,"bv4") #bv = bv4 #n=0 #while True: # res = bv.consume(n) # print("") # res.show() # #print(res.to_bytes()) # n += 1 ########### ### read() # #bv = bv4 #n=0 #while True: # res = bv.read(n) # print("") # res.show() # #print(res.to_bytes()) # n += 1 #print("read() test") #bv = bv4 #n=0 #while n < 60: # res = bv.read(n) # print(res.to_bytes()) # n += 1 #bv = bv4.close() #print("bv length: ", bv.n()) # #n=0 #while n < 60: # res = bv.read(n) # print(res.to_bytes()) # n += 1 #bv = bv4.close() #print("bv length: ", bv.n()) # #n=0 #while n < 60: # res = bv.read(n) # print(res.to_bytes()) # res.show() # n += 1 ########### ### readuntil() # #separators = by+b'x'+by+b'y'+by+b'z'+by+b'w' #bv = Bytesview(by+b'x').append(by+b'y').append(by+b'z').append(by) #bv.close() #print("bv length: ", bv.n()) # #n=0 #while n < 4*16+3+5: # sep = separators[n:n+7] # res = bv.readuntil(separator=sep) # print(sep, " ", res.to_bytes()) # res.show() # n += 1 def bv_list(bv=None): bvl = bv if bv is not None else Bytesview(b'') n = 0
0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(85, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(113, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(212, 127, 127)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 220)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush) brush = QtGui.QBrush(QtGui.QColor(212, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(85, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(113, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(212, 127, 127)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 220)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush) brush = QtGui.QBrush(QtGui.QColor(85, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush) brush = QtGui.QBrush(QtGui.QColor(212, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(85, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(113, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(85, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush) brush = QtGui.QBrush(QtGui.QColor(85, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(170, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 220)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush) self.experiment_duration_label.setPalette(palette) font = QtGui.QFont() font.setFamily("Arial") font.setBold(True) font.setWeight(75) self.experiment_duration_label.setFont(font) self.experiment_duration_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu) self.experiment_duration_label.setAcceptDrops(False) self.experiment_duration_label.setAutoFillBackground(True) self.experiment_duration_label.setFrameShape(QtWidgets.QFrame.Box) self.experiment_duration_label.setFrameShadow(QtWidgets.QFrame.Sunken) self.experiment_duration_label.setLineWidth(1) self.experiment_duration_label.setMidLineWidth(1) self.experiment_duration_label.setScaledContents(False) self.experiment_duration_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse) self.experiment_duration_label.setObjectName("experiment_duration_label") self.v_vs_label_1 = QtWidgets.QLineEdit(self.centralwidget) self.v_vs_label_1.setGeometry(QtCore.QRect(260, 300, 41, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush) self.v_vs_label_1.setPalette(palette) font = QtGui.QFont() font.setFamily("Arial") font.setBold(True) font.setWeight(75) self.v_vs_label_1.setFont(font) self.v_vs_label_1.setStatusTip("") self.v_vs_label_1.setStyleSheet("background-color: rgb(240, 240, 240);") self.v_vs_label_1.setFrame(False) self.v_vs_label_1.setObjectName("v_vs_label_1") self.v_vs_label_2 = QtWidgets.QLineEdit(self.centralwidget) self.v_vs_label_2.setGeometry(QtCore.QRect(260, 340, 41, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush) self.v_vs_label_2.setPalette(palette) font = QtGui.QFont() font.setFamily("Arial") font.setBold(True) font.setWeight(75) self.v_vs_label_2.setFont(font) self.v_vs_label_2.setStatusTip("") self.v_vs_label_2.setStyleSheet("background-color: rgb(240, 240, 240);") self.v_vs_label_2.setFrame(False) self.v_vs_label_2.setObjectName("v_vs_label_2") self.experiment_rest_time = QtWidgets.QLineEdit(self.centralwidget) self.experiment_rest_time.setGeometry(QtCore.QRect(190, 220, 61, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush) self.experiment_rest_time.setPalette(palette) font = QtGui.QFont() font.setFamily("Arial") font.setBold(True) font.setWeight(75) self.experiment_rest_time.setFont(font) self.experiment_rest_time.setText("") self.experiment_rest_time.setFrame(True) self.experiment_rest_time.setAlignment(QtCore.Qt.AlignCenter) self.experiment_rest_time.setObjectName("experiment_rest_time") self.advanced_parameters_button = QtWidgets.QPushButton(self.centralwidget) self.advanced_parameters_button.setGeometry(QtCore.QRect(400, 430, 201, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(91, 166, 232)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush) brush = QtGui.QBrush(QtGui.QColor(247, 217, 21)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Link, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(248, 221, 23)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush) brush = QtGui.QBrush(QtGui.QColor(247, 217, 21)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Link, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 120, 215)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush) brush = QtGui.QBrush(QtGui.QColor(247, 217, 21)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Link, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush) font = QtGui.QFont() font.setFamily("Arial") font.setBold(True) font.setWeight(75) self.advanced_parameters_button.setFont(font) self.advanced_parameters_button.setContextMenuPolicy(QtCore.Qt.PreventContextMenu) self.advanced_parameters_button.setAcceptDrops(False) self.advanced_parameters_button.setWhatsThis("") self.advanced_parameters_button.setAutoFillBackground(True) self.advanced_parameters_button.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)) self.advanced_parameters_button.setInputMethodHints(QtCore.Qt.ImhNone) self.advanced_parameters_button.setAutoRepeatDelay(301) self.advanced_parameters_button.setAutoRepeatInterval(96) self.advanced_parameters_button.setAutoDefault(False) self.advanced_parameters_button.setDefault(False) self.advanced_parameters_button.setFlat(False) self.advanced_parameters_button.setObjectName("advanced_parameters_button") self.start_voltage_label = QtWidgets.QLabel(self.centralwidget) self.start_voltage_label.setEnabled(True) self.start_voltage_label.setGeometry(QtCore.QRect(10, 300, 161, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(67, 121, 171)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush) brush = QtGui.QBrush(QtGui.QColor(84, 151, 213)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush) brush = QtGui.QBrush(QtGui.QColor(33, 60, 85)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush) brush = QtGui.QBrush(QtGui.QColor(44, 80, 114)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(67, 121, 171)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush) brush = QtGui.QBrush(QtGui.QColor(161, 188, 213)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 220)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(67, 121, 171)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush
<rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:ca\ cc91ae-fbba-df42\ -baea-899bdb2018\ e0\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :52-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x92u\ \x1ek\x00\x00\x01\x02IDAT8\xcb\x9d\xd3\xbfJ\ \x03A\x10\xc7\xf1\xcd]\x04\xf1\x1f\x01\xedD\xdf\xc0.\ \xea;\x08\x16\xb6\x96\xb6\x96\xb6\x82H\xacRj#\xa2\ \xadX\x88\xa8\x85\x95MJQ\x0b\x15A\xac\xac>\xaf\ `%\xb1\xd9\x83\xe5 \xe7\x9d\xc5\x17ffgvw\ ~\xb3\x1b\x10\x1a\xb0\x86\xe9hg\x08\x01\xad\xe8T\x91\ \xc7\xa2K\xdc\x17\xc5\xc8B\xc3\x1b\x04\x5c\xe0\xba\xf0\x03\ 6\xd0\xc7\x1e\xf6+\xe8a\x17\x07\xf8\xc1\x0d\xc6\x03\xb6\ p\x8e\x13\x9cVp\x169\xc27^\xd1\xf9O\x0b=\ <\xa0\x93\x8a\x98G\xda\x89hy)\x1ep\x8c\x0f\xcc\ \x96El%'\xe4\x15\xa7oc>\xcd+\x17\x1f\xc6\ \xa4\xbf6\xca\xd2)\xa4\xc5\xefXh\xa2Ia\xf4\xf1\ \x89\xc5\xe8\xcf`\x12S#(\xd6\xda\x01\x9b\x18\xc6\xb1\ \x0c\xf0\x82'<W\xf0\x887\xac\x07L\xc4'\xfa\x85\ U,a\x05],\x8f\xa0\x1bs\xe6\xd2~\xaep[\ \x12\xb5\x96\x06Y\x12\xb8\xc3N\xb4\xc7j|\xb2P~\ \x07Y\x14\xb0\xf6\x0d~\x01\xf6\xa4\xe1\xe7\x0b\x84\x86*\ \x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07\xb1\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 39-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58:40-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:58:40-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:24425a1a-91e5\ -6a48-a8b9-eb4d8\ 3a69869\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :d0eb71d8-27bd-6\ 14f-ab1f-2021ad1\ 1a98e\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:3a5f818\ 6-ddb4-3345-b349\ -698e07db3090\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:3a5f8186-ddb4\ -3345-b349-698e0\ 7db3090\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:39-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:24\ 425a1a-91e5-6a48\ -a8b9-eb4d83a698\ 69\x22 stEvt:when=\x22\ 2020-05-02T17:58\ :40-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x962\ O\xc4\x00\x00\x01fIDAT8\xcb\x95\xd3?(\ \x85Q\x18\xc7\xf1\xd7\xbd\x97R&e0Q\x942(\ \x92,\xca\x86\xc5L\xc6\x1b\xba\x85{\xfd\x1b\x0c,\x16\ %\x8bl\x84A\xd7\xdf\xb0H\xc9\xa2d\x90\x08\x83(\ \x7f\xa2n&\xa5\xec|\xcf\xf5;\xf5\xf4\xf6\x1a\xdc\xfa\ \xf4\x9es\xde\xe7>\xef9\xcf9'\xc8\xe5rAH\ B\xcf\x22L\xa1\xcb\xbc\x8b\x87\xe3m\xa7@\x5c\xbb\x1e\ \x978\xc5\x0d\xd6P\x1c\x95$*s\x06\xef\x98P\xbf\ \x04\xdbx@\xab\x89\x8b\xf9\x041\x0d\x94\xe1\x00\xb7h\ \xb4A\x92\xcc\xfd\xfe\xa6m\x12\xdfh\xc3\x1b\x16\xcdl\ \x12fi>Q5\xceq\x82\x0a?\x037\xe5/\xb4\ \x87\xa7\x17b\x979\x87\x0f4\xb8\xce:\xbe1\x10\x11\ \x1c7\xc9\xfcX\xa9\x8a\xea\xfe\x93\x0c\xd4\x99\xc11\x8e\ T\xb4\xe0\x0f\xcdx\xc6\x02v1\x14\xa8\xc2=\x0ap\ \xb3yD\x9d\xce\xc1<\x06\xf5\xae\x17\x9f\xe87\xb1\xc3\ \xae\xb1\x89\x94\xf9JJ5\xb9\xc3!\xce\xf0\x8a\x17\xd4\ \x988\xf7\xe1\xb4kl)\xbbk\x97c\x09\xf7\xe8\xd6\ X!f\x950m\x0e\x9bK\x90\xf1\x09\xdc\x1ewj\ +\xb3*T\xb8\xf2M\xb8\xc2\x05jU\xbb\xfc\x12\x96\ \xb5\xb6ktD\xdc\x89\x98i;\x93x\xd26\xe6w\ !\xabY\x04\xffP\xa9\xa3\xdd\xe7:\x1b\xba0\xab\xd8\ \xd7\xdav\x22\xf8\xf1=\xac\xe8\xbe\xa4\x5c\x82\x16\x8cc\ \x14#z\xfe\xc5\xbf\x1f\xd3\xc1\xab\xfa\x01\xec\x9d\x02\x0a\ *RH\x9c\x00\x00\x00\x00IEND\xaeB`\x82\ \ \x00\x00\x07\xb6\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 41-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:27-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:27-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:8444d4fb-1cd2\ -e94a-b082-e3d90\ 0b05953\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :221f40b1-0fe0-c\ e43-a15c-2e2a24c\ bd3ca\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:897c42f\ b-aaaa-474f-b4ec\ -91cdb84c1820\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:897c42fb-aaaa\ -474f-b4ec-91cdb\ 84c1820\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:41-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:84\ 44d4fb-1cd2-e94a\ -b082-e3d900b059\ 53\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :27-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>us\ \x22B\x00\x00\x01kIDAT8\xcbu\xd3\xbb+\ \xc5a\x1c\xc7\xf1\xe3\xe78.\x0ba0\x18\x18\xc8\xad\ \x94\x14\x8b\x92\x8c\x16\x93\xc9mpM\x06\xe7,\x06\x92\ \xc9@\xa7,.\x93r+\x97$\x8b\xdb`\xa2\xe4R\ \x06J\x91\x94\x93\xc8?`\xf2~\xea\xf3\xabo\x8f\xce\ \xf0:\xbf\xef\xf3;\xcf\xf3\xfd}\x9f[$\x95JE\ $\xc3\xc4V'\xd6\xb0\x85\x1e\xff\x7f\x7fp>fq\ \x8cETbEq\x1co\x98\xb7c\xec\xe0\x02\x5c\xe1\ \x04CX\xc7\x0b\xca\xcc\x17K\xf1\x81\xfa0\x89\xfb\x89\ \xaa1\x83\x0b\xaf\xc4el+\xce\xd2\xf3\x10\xfd\x8a\x03\ [\xc1.F\x15\xe7\xe9\xd9\x8c[d\x9b\xa4\xae\xdf`\ \xba\x04\x03\x8a\xc3\xaa\x1aq\x87\x1c\x93`GS\xfc\x97\ `\xcf$\xc8\xd4\xb3\x01\xf7\xc8U\xdbU\xf2\x84v\x9b\ \xec\xbc\x84Uo\xbec85_\x1f\xc7\x91\xbf\x0b\ \x81^T\xe1\x01\xd5fK\xdd\xfc;L\x82G\xec\xdb\ \xb3c\xa7P\x84\x1f\xcc\xa9\xdd\x8b_4\x99\x0158\ \xd3\xba\xd4\xda\x83\xd4\x8dw$Q\xa8w1\x95\xfc\xa5\ \xc3e\xb7w\x1a\xaf(q\x8d.|\xa2\xcdt\x08L\ \x5c\x81gLy\x0b|\xee\xb63\xa23>i\x16/\ 0\x8b\x14S\xdc\xaa\xb2\xa3&\xb1\xbb\x1f\xf10A_\ \x9a\x8b\x14\xaa\xc35\x8a\xb1\x80\x03|c8L\xe0\xce\ \xfc\x86\xce\x82o\x13\x97\xb8\xd1}q\xb7sD\x1f-\ w\x09Z\x90\xc0\x84\xc4\x8d\xf0]B\xdb\x19\xf8\xd5\xfd\ \x01\xeb\x7f\xf6f\xca\x93B\x96\x00\x00\x00\x00IEN\ D\xaeB`\x82\ \x00\x00\x07o\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xe8iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 37-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58-03:00\ \x22 xmp:MetadataDa\ te=\x222020-05-02T1\ 7:58-03:00\x22 dc:f\ ormat=\x22image/png\ \x22 photoshop:Colo\ rMode=\x223\x22 photos\ hop:ICCProfile=\x22\ sRGB IEC61966-2.\ 1\x22 xmpMM:Instanc\ eID=\x22xmp.iid:a33\ 592e3-4c96-8942-\ a543-392bef8a7cb\ f\x22 xmpMM:Documen\ tID=\x22adobe:docid\ :photoshop:cb9cd\ 370-24ec-8e4c-9b\ b7-a0a9032e1c71\x22\ xmpMM:OriginalD\ ocumentID=\x22xmp.d\ id:517fc3d6-5ed2\ -6c40-86a1-15236\ 034be83\x22> <xmpMM\ :History> <rdf:S\ eq> <rdf:li stEv\ t:action=\x22create\ d\x22 stEvt:instanc\ eID=\x22xmp.iid:517\ fc3d6-5ed2-6c40-\ 86a1-15236034be8\ 3\x22 stEvt:when=\x222\ 020-03-03T09:50:\ 37-03:00\x22 stEvt:\ softwareAgent=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22/>\ <rdf:li stEvt:a\ ction=\x22saved\x22 st\ Evt:instanceID=\x22\ xmp.iid:a33592e3\ -4c96-8942-a543-\ 392bef8a7cbf\x22 st\ Evt:when=\x222020-0\ 5-02T17:58-03:00\ \x22 stEvt:software\ Agent=\x22Adobe Pho\ toshop 21.0 (Win\ dows)\x22 stEvt:cha\ nged=\x22/\x22/> </rdf\ :Seq> </xmpMM:Hi\ story> </rdf:Des\ cription> </rdf:\ RDF> </x:xmpmeta\ > <?xpacket end=\ \x22r\x22?>fta\xae\x00\x00\x01-IDA\ T8\xcb\x85\xd3\xbf+\x87Q\x14\xc7\xf1\xeb\xfb\xa5d\ \xa3d\xb6\x180\x88I\x12V6e\xf0'\xd8\x0c\xb2\ \xca\xc6\xa0\xef`1\x99,\xca \x93b\x91\x92_}\ C\x92\xc5 \xf5\xca_`\xb6\x9c\xa7\xee\xf7\xe9y\x18\ n\xcfy\xce9\xf7s\xde\xf7\xdes\x12R\xb6\xba2\ {\x10s\x98\xaa\x89'\xa4\xaa\xcd\xbd8\xc0+n\xf1\ \x886V\xaaDRI\xb1\x1bW8\xc3h\xf8\xfa\xb0\ \x8a/\xac\x85\xafQ\x16(\x1c\xdb\xb8\xacA\x9e\xc6\x07\ \x86\xf3X\x99\xe0\x01\x8ba\xf7d\xfe\xc2>\xc5FF\ \xdb!0\x10\x02\xa3\x15\xd5\x0b\xc2=\xb4\xca\x02E\xe2\ \x10\xee1\xf2\x87\xc0.\xf6\xeb\x08\xfaq\x87\xb1?\x04\ ZA\xd1!\xd0\xc8.\xe9=\xde?\xa1\x19\xb1F\x91\ \x8cu\x5c\x94\x9f\xb1\xa8t\x82\xadr\xa3T\xac6\x96\ \x0a\xb2\xc2\xb9\x80\x1f,\x07\xc9tt\xe1|\xc4f0\ \x8b\x09\x1c\xe2\xa5\xdc\x07sx\xc2MT\xf8\x8e\xef}\ t\xe3\x07>q\x1dy\xc7q\xc4T\x87\xf9\x86\xc9\xec\ \xff\x08;U\xb9\xe5Yhf\xef\xfd\x1cd\x9bA0\ \x9e]nW\xdd,\xa4,i+\x1a\xeb<\xce\xdf1\ \x03\xff\x09\xe4B\xb5\xa3\x8c\xf4\x0bB\x91\xe0c(\x84\ j\xa4\x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07k\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 40-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58:58-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:58:58-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:5d15c02f-5322\ -5f4d-b2ac-d2a72\ 9880a94\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :98ca12ed-c366-c\ 24e-ac49-f70f5b7\ 98d09\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:20c9438\ c-2aea-6749-9401\ -a748428197e1\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:20c9438c-2aea\ -6749-9401-a7484\ 28197e1\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:40-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:5d\ 15c02f-5322-5f4d\ -b2ac-d2a729880a\ 94\x22 stEvt:when=\x22\ 2020-05-02T17:58\ :58-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x82\xce\ \xc3G\x00\x00\x01 IDAT8\xcb\x8d\xd2\xbf.\ DA\x14\xc7\xf1a\x17\x85B!\xab\x15\xadw\x90\x8d\ \xb0*\x15\x96V!\xdeA\xa7\x12\x1a\xa2$$*\x89\ x\x00\x1d\xa1\x13\x85\xc6\xbf\x0e\xcd'A\xeb\x014\xe7\ \xca\xb8\xb97\xab\x98\xcc\xcc\x99\xf9}\xe7\x9c\xdf\x99\x84\ \x84\xbe\x98\xcb\xa3.\x9e\xd0\x8f\x94\x07\xba\xb8\xc4\x1d\xae\ \xb1\x1a\xf1!l\xe36\xce\xce1Y@\x0a\xf1&^\ \xb1\x8ev\x88\x9fq\x8c+\x5c`\x1e\xd3\x01\x13\xf7R\ B\x07\xef\x18/\xa58\x82\x07\x1cT\xa4\xbf\x8a\x17\x0c\ '\x1c\x065a0jkf\xfbB\xd4D#\xf3\xe5\ \x06\xcb\x09gX\xcb\x8d\xa900_7b>\xc1F\ \xc2\x0eN\xb3WR/\xe7#\xb3'\xb4\x13&\xf0\x86\ \x95\x1e\x90<\x8b\xa30\xf6\xb7\x8d\xb3\xf8\xc0b\x0d$\ \x17\xef\xe3\x11\xa3\x05\xa0\xa8\xa9\x83/,\x95 \xb9x\ /\xda;V\xfe\x07\xcd\x0c\xf2\x99A\x062\xf1n\x88\ [\xb9\x99\xa9\xd4\xa6\x1c\xd2\xadx\xb9U.\xb1lT\ q0\x17\xe5,`+\x1coU\xf9S\xe5vqa\ \x06\xdf\xb8\xaf\x13\xd7\x01\xf2~OE\x9bk\xdb\xfb\x9f\ OS^\xff\x19?J`\xde\xc8\x9b\xcf\x02g\x00\x00\ \x00\x00IEND\xaeB`\x82\ \x00\x00\x07\x9f\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 40-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:01-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:01-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:be37167e-4151\ -b240-b56c-cc43b\ ece044f\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :e34b6d77-9a4d-7\ 14d-8812-ad1f09b\ 86a4d\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:9e72c47\ 0-3934-bc4b-95df\ -3540129ad66f\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:9e72c470-3934\ -bc4b-95df-35401\ 29ad66f\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:40-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:be\ 37167e-4151-b240\ -b56c-cc43bece04\ 4f\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :01-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x06\xd9\ \xb6\xfa\x00\x00\x01TIDAT8\x8d\x8d\xd2\xbbJ\ \x03A\x14\xc6\xf1h\x12\x83QAlm#\x1a\xa3\x85\ \x18/XY\xf8\x04>\x81\x85`\xa7\x90*\x22*^\ j\xb1Q4\xdec\xe9\x03\xd8\x09\x16\x8awAH\xc4\ 6\xeav\xb6\x166\xeb\xff\xc8\x198,\xbb\x98\xc0\x8f\ \xd9d2\xdf\x9c9\xb31\xdf\xf7c\x9e\xe7\xa5\xd0\x82\ \xd6:\xa5\x91\x90\xb5\xb2\xb8\x09\x17x\xc5=\x1e\xf1\x84\ g}\x0e\xba\xc1\x07\x96\x5c\x80\xa4U0\x85>\x8cb\ \x00\xbd\x18\xc6\x10\xf2:\x8e\xa1\x07\xc78t\x01R\xfa\ -\xb2\xf2\x83\x1e\xa9\x1d\x13\xee{\x10s\xf3(\xd9\x80\ ;\x8c\xe8d\x07.\xe1c\xd6,j\xc0\x8c>\x17\xb1\ o\x03\xe4lY\xc4\xf5\xec;\xe8\xc7'\x0a\xbah[\ Cw\xb1n+h\xc3\x15\xc6u\xe7\xb2\xd9UB\xde\ \xb4\xc2st\xe2L\x836\x5c@\xb3v\xfd\xcb\xa5\xea\ \xe2\xa4\x8e\xd2\xb4o,\x9a\xb9\x07\xa9\xd2\x05$\xf1.\ \x9d\xb5\xe7\xd51\xaec\x17jr\xf6\xb0\x1e\xa4\xb5\x07\ 9\xbb\xc8\x84\xb9\x10\xb9V\xf9Lc\x0e\x07\xc1[\x18\ \xb4\xbbG\x84\xe4P\xd5j6m\x80\xbc\x81y\xfdS\ \x02\x8d!\x5cO\xba\xf1\x83-\x1bp\x8dL\xd4\x8b\x13\ RQA\xae\xd3\xde\x824\xb1\x8c\x05\xacb%\xc2\x9a\ 6\xf0\x05'.@^\x9ee\x1caO\xba\xfb\x8f\x12\ N1\xf9\x17Po\xd9Q~\x01yB\x14\xderg\ \x85\x96\x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07\xa1\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 34-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:57:41-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:57:41-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:ef3af498-e399\ -0942-91aa-e641c\ f1f7e4e\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :154cd6bb-bfcc-f\ 049-bc52-8169c48\ 8f52d\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:c41b965\ 7-fccb-7149-9d39\ -d4a090c527ce\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:c41b9657-fccb\ -7149-9d39-d4a09\ 0c527ce\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:34-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:ef\ 3af498-e399-0942\ -91aa-e641cf1f7e\ 4e\x22 stEvt:when=\x22\ 2020-05-02T17:57\ :41-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xd8\x9b\ \xbf\xde\x00\x00\x01VIDAT8\xcbu\xd3?(\ \x85Q\x18\xc7\xf1s\x8f\x7fe\xa0\xeeB\x06\xddI)\ \x8bA\x16\x8aR\xc4\xa0LF\x91\x01\x83?uW\x8b\ \x12)e!\xd1\x1d\x95A\xd7\xa0\x942\xa0(\x03\x19\ HQH\xbd2\xd8\x14\x99|O=G\x8f\xc7k\xf8\ \xd49\xef{\xcf\xef>\xefs\xceqI\x928%\x03\ \xaf\xe6\xbdhV\xf3R\xf3{\xa7'%j\xdc\x8a\x03\ |\xe0\x0d\x1b\xa8Q\xef}Z@P\x87\x02^0\x8d\ =,a\x05O\xc8K\x95\xb1Z\xa7K\x9f\xc2\x03\xb6\ P/\xcfw0,\xe3\x0e\x9c\xe1\x0a]q]\x0c\x98\ \xc4\x1dZLEEL\x98gcxF\x93\xae`\x13\ \xb32\xaeP\xfd\x08\x01\xe32.W\xe5\x9f\xa0_\x07\ \xacbQ\xc6e\xc2\xc9\xe7\x0c\xa9\xe0\x18p*;\xf4\ \x13\xb0\x86\x05\xb3\x1bY\xe9I!e\xa7B@\x9f\x0d\ \x98W\xa5\xe6\xa5\xeba\xf1>.0\xa0\x82\xfe\x04\xc4\ Oh\xc35\x8e\xd0\xae\x16\x8cJ\x93\x0f\xd1\x88c\x1b\ \x10\x16\x7f\xca\xe1\x19L;0\xa8\x92F_\xe2=n\ e|\xd9\x89/\xac\x9b\xc5\xde\x9c\xd0Z\xdc\xc8Y\xc8\ \xc6\x80\xd8\xd9\x1c\xb6\xf1({\xed\xcc\xbf/\xe3\x15s\ \xa8\xb4\x07I\x97\xda-e\x9e\xcb\xe9\x1b\xc1=v\xd1\ `.\xde\xaf\xbb\xe0M\xd0\x0cn\xa5a=f+3\ \xff]&[M\xb5\xea\x81\xd7\x0b\xa3o\x1be\xef\x1d\ ^\xd3\xf2\x8d\x00\x00\x00\x00IEND\xaeB`\x82\ \ \x00\x00\x07\xb0\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 41-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:20-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:20-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:3043125a-8ee8\ -2c41-ab08-00e68\ 3d8f0c9\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :16997b4f-4482-e\ 64b-ab0b-338ab48\ c85eb\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:d8ea74e\ 0-9350-2648-add3\ -8fc22561d22f\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:d8ea74e0-9350\ -2648-add3-8fc22\ 561d22f\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:41-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:30\ 43125a-8ee8-2c41\ -ab08-00e683d8f0\ c9\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :20-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>~\x12\ \xf2B\x00\x00\x01eIDAT8\xcb}\xd3A+\ DQ\x14\x07\xf0;cD\xc2\xbcIv\xac-h\x12\ Y\xe1+XL\x163\x8bI\x11EY\x93\xa2\xf8\x00\ l-4%+YP\x9a\xa2\xecl4Y L&\ \xa5\xd0\xdbZ\xf8\x00\xfcO\xfdO\xfd{=\x16\xbf\xee\ \x9b7\xf7\x9e{\xde\xb9\xe7\x868\x8e\x03e9\xe6\xa0\ \x0c{p\x045\xd8\x81\xa9\x94\xb9\xc1_d8\xce\xc0\ \x13\xdc\xc1\x01\xac\xc3\x16\x9c\xc0\x0b\xd4aP\x83h\xb4\ U\xf8\x82E\xd9IE\xb0\x0f\xef0\xe2A\x82\xecl\ \x8b\xc7%\xba\xb3\xec\xda$\xd0\x1a\xb3\x89<\x83\x1c\xd3\ ^\x92\x1a\x84?x\xb6\xa7\xb0\xeb\x01\xac`\xf72\xa9\ \x13zS\x16\xe6%\xf8(4\xa1\x10X\xed\x9aL^\ \x81\x1b\x09f\xe3\x10|J\x01\xed\xfd-L\x07\x1e\xd5\ \x86\x04\x18\x86\xd9D\x06\xdd0\xc7\xd1\xdf\x9d\xc1B\x90\ s\x0e\x89bY\xa5\x97\xa1\x9a\xf2I\xe6\xc2>\xdf\x1e\ \xb6Y\x14{n\xe7h\x05\xfd\x86\x1fz\x94\xa33=\ ,|\xd1~L\xc2+\xf4\xf3\xcf1Y\xa8\xae\xa1\x83\ sJ\xd0\xb0\x8c=\xa2u\xd8!\x9f+\xf0\x06\x0f\xdc\ \xd9<\xb3h}\x9cc'0\xaf\xad<\x00\x1f\xb0\xf9\ O\x0f\xb8+8\xd6\xbb\x90\x95\xea\xb7\x98\xcd\x04t\xc9\ =\x89x2M.\xf6bg\x92\x1d\x96g\x87\xb5x\ \xa1\xce\xe1\x92\x0b\x1b\x9e\xb6^\xc0\xb465\x05k\x12\ .\xb0\x9a\x14\x13G\xec\xb77\xfc\x02\xfa\x9c\xf5\xb6\x8c\ \xcb\x83\xe5\x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07#\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 39-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58:34-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:58:34-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:2025def7-af12\ -fd4f-8fdf-fe2a8\ d6eb12c\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :45df4c82-bcb0-8\ f40-a9a0-934274d\ 50261\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:655e565\ e-70ef-8047-9724\ -56ce08c2c5df\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:655e565e-70ef\ -8047-9724-56ce0\ 8c2c5df\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:39-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:20\ 25def7-af12-fd4f\ -8fdf-fe2a8d6eb1\ 2c\x22 stEvt:when=\x22\ 2020-05-02T17:58\ :34-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x1b\xf8\ \x16w\x00\x00\x00\xd8IDAT8\xcb\xd5\xd2\xbf\x0e\ \xc1P\x14\xc7\xf1j%\xd8\x88\xa7\xf0\x146\x89\x85w\ 0\x1a\xfc\x89A$\x08\x91\xd8m\x84\xd5\x7f\x22\x06f\ \xa3\xc4l\xf0\x02}\x05\x8b\xc1\xf7&Gr\xd3\xb4\xda\ \xd4d\xf8\xb4\xa7\xb9\xa7\xbf\xdc\x9c{\x0d\xdb\xb6\x0d\x17\ \x11y\x17Q\x92\xdat\xeb5<\x02>\xcdC\xcc\xa5\ \xb6\xc2\x04\xb41\xf9%\xa0\x8b\xe9\xff\x05\x98Z\xb33\ \xc0\x0a\xba\x83\x8fN\x98\x1d\xe4\x91\x94\xba\x87\x99v7\ \x0aH\xf8\x05\xa8s\xbfH\xdd\xd2\x02\xc6\xb8!\x16d\ \x06g\xec\xd0\xc7\x08\x03<\x90v\xdcT\xc3k\xfa\xaa\ a\x8f\x17\x9e\xb8j?\x9bn;\x88h\xf4a\x1dq\ GJ\xbe\xa3\x8e~\xdfSP\xe2\xdf\xd6\xd5\xa3\x8c\x13\ \x96\xd8\x885\xb6Xa!\xf3X;\xd6\x0f\xc8\xaa\x80\ \x1c\x9a\xa8\xa0\xa6\xa9\xa2.\xaa.k\x0dd\xdeF\xb7\ \xda\x0a\x94\x84wN\x00\x00\x00\x00IEND\xaeB\ `\x82\ \x00\x00\x07M\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 41-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:16-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:16-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:a0872eaa-7736\ -1d48-8f72-f7b2e\ b06cec9\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :ceaed662-d8a8-e\ f45-a3fd-65da98b\ 58ddb\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:2719988\ 4-948d-ab43-83d1\ -2b99e3917caa\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:27199884-948d\ -ab43-83d1-2b99e\ 3917caa\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:41-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:a0\ 872eaa-7736-1d48\ -8f72-f7b2eb06ce\ c9\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :16-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xa2\xb4\ \xb1d\x00\x00\x01\x02IDAT8\x8d\x8d\xd3?J\ CA\x10\xc7\xf1\x97<1\xdaZ(\xa2\xad\xa9,\x84\ \x04-\x05A\x04;\xb1\x12\x0fa\xe1\x05\xbc\x84\xa5\x85\ \x95\x9d7\xb0U\xd0\xe0_$\x82\x8dX|\xf0\x04\xb6\ Z\xb8\x0f\xc7\xc7K\xdc\x85\x1f;\xb3\xcc|\x87\xd9\xdd\ )P\x04\x95h\xd5\xce\xea*\xa3\x9f\x15\x94\xd4\x08\x8e\ \xce\x14\xf61\x9f\xfc\x89\x86\x84Y\xec\xc5\x02\x05\xda\xc9\ Y\xc5'\x86\xd8\x08I\xed\x10\xb3\x86\xd7T\xac@+\ \x02\xb6p\x89\x1d?\xeb(@&\xd3\xde\xc3\x00\x9d&\ \xc0&n\x92\xbd\x94\xec\x0b\xcc\x05P\x1fw9\x80J\ \xc7x\xc7vh3\x0bP\x86\xf3]|a\x1d]<\ \xe6\x00\xaa\x17\x98\xc6\x09\x9e\xb0\x88e<\x8c\x03\x0cj\ \xfd\xbe\xe0\x1c3\xb9-\x5c%\xfb\x10\x1f8\xa8\xddI\ \x0f\xb7\xe3\x00C\x9c\xe1\x19+U\x90\xdf\x8f\xd3\xc7}\ \x1dP}\xd1.\xdep\x1a\xee\xa0J\xac\x8a\xf4p]\ \x07D\xc57o\x9a\x87\x0e\x16B\xd1\x91\xc3\xf4\xdfD\ \x8e\x9c\xc6\x9c\xc4?1\xdf.\xb2\xd8\x0b:\xce0\xb4\ \x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07\x8e\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 39-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58:22-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:58:22-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:41b76bba-1f9c\ -aa41-a120-30daa\ a633398\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :bd4a1d75-8265-f\ 141-9f67-a51e564\ 8287d\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:a1ccc90\ e-cd03-e14d-8dd8\ -50432223a770\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:a1ccc90e-cd03\ -e14d-8dd8-50432\ 223a770\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:39-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:41\ b76bba-1f9c-aa41\ -a120-30daaa6333\ 98\x22 stEvt:when=\x22\ 2020-05-02T17:58\ :22-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xa0;\ \xae}\x00\x00\x01CIDAT8\xcb}\xd3=/\ DA\x14\xc6\xf1\x1d4D\xa1WRH\xe8I\x84x\ )\x14\x84\x88E\x08\x85\x82H\xa8\x14\xbe\xc1\xaa|\x82\ \xd5(\x14\x88D\xc1F%\x22\xa2\x12\x8dZ\xe2\xa5\xd8\ H\xbc,\x9f\x80\xff$\xcflNn\xce\xb5\xc9/;\ \xf7\xce\x9csg\xce\xb9\xb7P\xadV\x0b\x8eF\xfd\x17\ 1\xa6q\x93\xb7\xd6\x0bN\x0b\xe7\xf1\x837\x0c\xe6%\ \xc9\x0b^\xc0'\xfa0\x81o\x0cxI\xbcm\xc7'\ \x7f\x98\x80h\x0a5\x0cg\xd6\xd6\x13\x04\xf3\xe4/\x13\ |\x81c\x8d\xa7u\xa4Q]7\xa4\x04)x\x09\xcf\ \xe85\xc1\x97\xb8\xc6\xa1\xeeM\xe2\xc5\x146\xd8\x04e\ <\xa0\x05g\xb82G\xb8\xc3>\xda\xf0\x8aR\xda\x85\ M\x10\xed\xe1\x17\xa7\xba^\xc1\x8c\xc67\x9a+yE\ \x0c\xa68\x9b\x1a/\xabp\xf17\xae3o\xfc\xd7\xc6\ \x90\x0a\x83U\x05w\xa9\xa05\x9d?\xb51x\x09R\ \xf0:\xde\xd1c\xe6\x86\xd4\x9d\xb9\xec\xbb\x90}\x91\xd6\ \xb4\xb0[\xd7\xf78\xd7xDs\xb3ym\x8cg\x7f\ B\x87\x09>P\x82\x8a\xee\xf5\xab\xd5\x8b^\x1bw\xf1\ \x88v\xdc\xe2\xc8\xec,\xbe\x0b'\xe8TQ\xb7m\x1b\ \xad\x1d\xb5\xaa\xec|d\x15\xcdm\xd9\xcee\x17\x05\x15\ \xaa\xd9\xe9L\xab\xd9z\xdd\x1fZX\xef\x19N,\x17\ k\x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07\x9b\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 39-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58:27-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:58:27-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:7dba185a-3a24\ -ff4e-8b6c-7e2a8\ 3aad4c7\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :146d6e41-fa85-3\ f46-82ae-f381af7\ 34f3a\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:046fff4\ 6-43be-514b-a33a\ -c3eb4c6e31f5\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:046fff46-43be\ -514b-a33a-c3eb4\ c6e31f5\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:39-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:7d\ ba185a-3a24-ff4e\ -8b6c-7e2a83aad4\ c7\x22 stEvt:when=\x22\ 2020-05-02T17:58\ :27-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x19r\ \x01\xa2\x00\x00\x01PIDAT8\xcb}\xd3\xcb+\ EQ\x14\xc7\xf1s\xcf\xf5\xbeb\x84PJ&\x22\x13\ E\xb7L\x0c\x98\xf9\x17\x14\xca+23\x90H\xc6F\ &f\x8a\x94\xf7\xa3\x84\x14%\x19\x18\xa9+L<'\ \xee\xbf\xc1w\xeb\xb7k\xb5\x1d\x06\x9f\xd6>{\x9d\xbb\ \xef\xda\xeb\xec\x1d\xe5\xf3\xf9\xc8H)\xd6c\x01\x1bX\ \xc5P\xc2;?\xec\x8fc\xc5n\xbc\xe2\x10\x93\x98\xc5\ \x1d\xceQ\x19.\x12\xaeZ\x8bw\x0c\x06\x959\xfb\xd8\ \xfek\x81\x02\xc5ylj\x9c\x16\x9f\xab\xc0#\xdaL\ >\xb2\xa5;W\x98\xd2\xb8\xc8\xcc\xa7\x15/1g\xfb\ \xe1\x07Y\xdc\xe2\x0d}\x09\xe5{K\xf8\xc0\x19\xea|\ \x05-\xf8\xc44J\xcc\xcb\x85j\xe4L\xb0\xef2\xac\ \xe1\xc6W\xb0\xac\x09\xfbOn\xa1c|\xc9t\xd0+\ \xe7\x1e\xbd\x91\x9a6\x1e\xec\xdbu|\x07[*;\x87\ a\xe5\x8a\x15]~\xcc\x0d\x16q\x1aT\x90U\xdcE\ ?2h5\xf9r\xbc\xa0\xc3=T\xe1\x09+:\x81\ \x19\xd3\xf5#S]\xa9rM\xb8\xc0\x9e=\x07\x8d\xea\ lN\xa7\xee\x1a\xd5\xaa`\x00=x\xd6\x97z\xc0\xba\ \x9a\xf9\xeb\x1c4\xa0\x13\xedz>\xc0\x88*\xear%\ \xa3&\xe9\x1c\xa4\xc2K\x22\xee3\x8e&\xcc\xc7I\x97\ \xc9/\x14\x9b\x1e\x9c`\xc2|\xa1\xf8\xbf\xdb\x98\xc4\xed\ \xbd9\xe9\x1a{\xdf\xad\xda\xe8\xe0\x91\xf7\xa5\xd2\x00\x00\ \x00\x00IEND\xaeB`\x82\ \x00\x00\x07v\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 41-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:14-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:14-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:578016cc-d26f\ -884b-b01f-c8bc3\ 3e3d75a\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :38fd30ac-6fac-e\ 947-b713-cf17888\ 06f20\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:e917e94\ 3-2f38-fb4c-9bb5\ -8df7c8d669dd\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:e917e943-2f38\ -fb4c-9bb5-8df7c\ 8d669dd\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:41-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:57\ 8016cc-d26f-884b\ -b01f-c8bc33e3d7\ 5a\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :14-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xd7C\ \xcc\x1f\x00\x00\x01+IDAT8\x8d\x8d\xd2\xbd.\ DQ\x14\x05\xe03C3\x85\x06\x0d\x83\x82x\x03*\ ^@\xadP\x10\x0f (\xa6\x22\x12\x9dd\xa2\xd2)\ \x84\x86x\x00\x12\x95^4$\xfe\x1aD!\xf9\x1a\x0f\ Aa\xdf\xe4\xb8s\x07\xc5\xca\xc9\xbew\xefu\xd6\xda\ \xeb$\xa4\x0c\xd3h\xe3\x18Gha \xfe\xd5K\xbd\ \x09)/v\xf1\x86C\xacc\x03\x17x\xc1\x5c7\x92\ |\xf8\x11\xcd\x8a[\xe6\xf1\x81\xd9*\x92B\xf6[6\ \xdc\x8bZ4\xf6\xc4\xb7\x15\x5cu\xb3\xd0\x0e\xd9\xc5p\ \xdeP\x8b\xb3\x81\x07L\x95U$\x9c`\xad4P\x85\ s,V\x11\x1ca\xf3\xb7M\x07.c\x1f\x1d\x04\xad\ \xd8v\x0a\xcf\xb9\x8ab\x07\xc3x\xc5dYi\x8a\x9c\ _2\xf6*\x1c\xe34\xab\x7f(H\x91\xf3Gl\xbb\ \x9156c\xf8\x13O\xd8\xc7P\xae.g\x9b\x89\xa8\ \x1ep\x16\x9e_\x83\xe0\x11\xab8\x88\xc8\xc7\x0b%\x1d\ \x92\x22\xaa\xa5\xb0Tx\xde\x0b\xa2\x84m\xbcc\x22\xb7\ \xf0W\x02\x83x\xf6\xfdf\x12\xb6p\x87\xfe\xaa\xe6z\ \x86ZF\xdc\x0c\xf9;Q\xdf`\xb9\xdb\x8de\x14q\ \x8e\xe2\x16\xf7\xb8\xc6\xd8\x7f\x09r\x8b}X\xc0\x08\xd2\ \x17\x12\x09\xe39\x80`\x04r\x00\x00\x00\x00IEN\ D\xaeB`\x82\ \x00\x00\x07k\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 41-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:14-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:14-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:99432555-d62c\ -434d-97aa-a9c19\ 01a4aba\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :9b75ec5e-4812-5\ a49-841b-f195554\ 3a259\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:460a839\ f-1202-8a43-a32f\ -c4fdc20fbd6c\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:460a839f-1202\ -8a43-a32f-c4fdc\ 20fbd6c\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:41-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:99\ 432555-d62c-434d\ -97aa-a9c1901a4a\ ba\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :14-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xcc\xe6\ \x11.\x00\x00\x01 IDAT8\xcb\xa5\xd3\xbf+\ \xc5Q\x18\xc7\xf1\xefu\x91;\xf8Q\x16\x94d\xb3`\ \xd3\x1dXl\xfe\x00\x83\xc8\x1f\x80;\xa8\xfb'\xdc\x12\ \x06B\x16\x0b\xc9@\x19\xc5$\xcad\xa1n\x06\x83\x98\ ^\xa3\x7f\x81\xe5\x9c:\xf1\xbd\xf7*\xc3\xa7\x9e\xd39\ \xbd\xfb<\xcf\xe7<\x19\xb2\xa0B\xa2\xac\x89\xda\xd2s\ \xd6\xe2qK\xc5\xa2\x8a\x1b\x9c\xe1\x1c\x9b\x89\xab\xec\x87\ \xcbYt\xff\x04\xcca\x19\xbb\xf8\xc2G\x9e\xdd\xa0k\ l\xc5\xfb\x0c\xc5p\x18\xc2#.p\x9b\xe3 \xc2\xc6\ \xf1\x8a\x91\xd4A\x1f\xdePA\x19\xf5\x06-D\xc8\x15\ VR\xc0\x1e.C]\xc6I\x83\xa1E\xe0!6\x22\ \xa0\x03\xcf\x98\x09\x97\x9d(\xb5\x88\xf0\x08\xb5\x08\x98\x08\ \xbd\xf76\x89\xab\x88\xf6\xc4\xc1\x03\x16#`2\x00z\ \x9aX\xae`;\xd4\x0bxAW\x04\x94\xf0\x84\xa9\x9c\ \xe8b]\xc3\x01F\xf1\x89\xf94\xc6\x0c\xc7\xa1\xafF\ \x80*\xeep\x8f\xa5\xbc\x9f8\x8c\xf7\x90@j=\x02\ \xd6\xc2\x07\xabc\x15\xeb\xe8\x8f\x80\xf8x\x0c\x03\x0df\ 0\x1dZ\xd8\xc1)\xf61\x98:(\xfcw\x99\xb2?\ \xae\xf1\xaf\x95\xff\x06\xe8`\xda(\x18\x1d\xb1\x17\x00\x00\ \x00\x00IEND\xaeB`\x82\ \x00\x00\x07\x96\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 42-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:43-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:43-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:a1d0ef62-876f\ -7f42-85fb-05634\ 2977577\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :7d8e4441-78b1-6\ e45-999b-624a936\ 38c99\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:c71daee\ 3-3cba-8243-bb64\ -4865d97a8678\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:c71daee3-3cba\ -8243-bb64-4865d\ 97a8678\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:42-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:a1\ d0ef62-876f-7f42\ -85fb-0563429775\ 77\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :43-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>F\xbc\ r\xca\x00\x00\x01KIDAT8\xcb\x95\xd3\xcd+\ DQ\x18\xc7\xf1\x8bdfg+;\x0b)\xaf\x0b\x09\ \xa5(\xafY\xb2\x1e\xa5\x84\xdd\xe4%\xd9X\xd8`g\ cI6\xe4=\xb3\x9a\x05%\x92\xbc\xe4ml\x14)\ \xd2U\xfe\x0a\xdf\xa7~\xa3\xd3uoc\x16\x9f\xe6\x9c\ g\xee}\xees\xcey\x8e\xe7\xfb\xbe'1\xcc#\x85\ ]\x1c`[\xe3}\xec\xe0\x10\x83z\xbe\xc0~mP\ \xa8@\x1b\xbe0\x821$\x90\xd4x\x18CX\xc6#\ \x8a\xb3I\xdc\x04\xdd\xb8p*\xea\xd7|\x0f\xe5\x8aU\ \xe1N\xd5\xfeI\xd0\x89[\x8d+\xf0\x81v,\xe0X\ \xf1f\xdc\xa3$,A\x97\x93\xc0\xbe\xf4\x8e2\x8c\xe3\ R\xf1\xa6\x5c\x09n\x9c%\xcc\xe1\x05\x19U\x97w\x02\ S\x1a\x987\xe6\x93 \xa1\xa3\x5c\xd7\x8b\xff\xaa \xbb\ \x07\x8bx\xc3\xa8\xc6\xdf\xa8\xd3\xded\xa2\x12\xd81\x9e\ k\xe3^\xf5p\x5c\xff\xcdb\x0b5x\x88:\xc6\x1e\ \x9c\xa0Egm\xb1\x15\xc5\xabq\x8aVU\x19\x8b\xaa\ \xe0J\xf3gu\xa0\xc5\x8b\x90\xc6\x12*\xb5\x84\x9c\x8d\ \xd4\xa7>\xb0\x06z\xc2\x99^j\xd0\x12B\xf7\xa0\x17\ \xd7\x81#\x1c@\x87\x13\xab\xd7]\x88\x87%\xa8\xc5\xa7\ n\x9e\xd9\xc0*\xd6t\x137\xd5\x91Gnox\x81\ F\xb1\xaf\xcd`\x02S\x98\x96II\xea\x9e\xfc^\xe7\ \x1f5[\xfcJ\x122\xb6o\x00\x00\x00\x00IEN\ D\xaeB`\x82\ \x00\x00\x07i\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 37-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:57:59-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:57:59-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:835df96b-d001\ -2c4f-bcdb-f9a22\ 6a1f1a6\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :144cb60c-3123-9\ 640-8f5c-f875c06\ 4a572\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:d38e5b8\ e-e982-bb49-a1c2\ -bbc2a4a59f60\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:d38e5b8e-e982\ -bb49-a1c2-bbc2a\ 4a59f60\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:37-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:83\ 5df96b-d001-2c4f\ -bcdb-f9a226a1f1\ a6\x22 stEvt:when=\x22\ 2020-05-02T17:57\ :59-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x08\x04\ Bw\x00\x00\x01\x1eIDAT8\xcb\x95\xd3\xbd.\ \x84A\x18\x05\xe0Y\xdbH\x14\xb2\xabV\xe8\x10Q\xc9\ \x0a\x11KX\xd9\x0bP*lA)\xabW\x8bN\x5c\ \x81DD\x10Q\xb9\x05?\x8b\x08\x89-\x5c\x80\x87\xb8\ \x0a\xcd\xecf\xf2e\xf1)N&3\xf3\xce\x993\xef\ 9\x13\x10\xd0\x17Q\x88\xf3\x0e\x0a\xc9^\x8anM\xc8\ \x1c\x08\x18\xc6j\x8f\xf5\x9e\x08(c\x1e5\xf4\xc7\xf1\ 5\x16L`\x09U,`\x11S(\xa6\x04\xfbx\xc7\ \x13*\x98\xc3c,\xd8\xc5\x0bnq\x87\x1b|\xa1\xde\ yz\xc0%\x1a\x89\xac\x1a\x1e~\x91}\x81\xf5T\xc1\ \x196\x92\x82e\xb4\x92&\x16\x92f\x06\x9c\xe2\x0d'\ \xa8\x06\x9cg\x08\xeah'\x04Y\x05\xb3Qq\x1b[\ \xbd\x08*8\xcc\xe1\xc0\x11\x9a\xbd\x08\x061\x99\x93\xa0\ \xab`3\xd9\x98\xc1q\x12\xb0\xb4\x0f\x01+\xd8\x89\xce\ 5;Ml\xe4\x0dN\xbc\xf0\x19\x07\x98\xee\xd8\xb8\x1d\ \xc31\x84R\x0cW)\x83r\x0c\xda\x15\xd6R\x1b\xf7\ \xf0\x81k\xdc\xff\x81\x16>c:\x03\x8a\x01\x03\x18\xc5\ X\x0e\x8cc$\xb57\xfc\xe3\xed?~\xa6l\xe2\xf2\ \xa0K\xf0\x0dfu\xe5\xbc\x95\xeb3\xb5\x00\x00\x00\x00\ IEND\xaeB`\x82\ \x00\x00\x07\x8e\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 42-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:44-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:44-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:f6b11691-e85d\ -8341-8835-bf7ad\ 94e7a37\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :03deb8c8-32ba-1\ 445-a8f6-f22bbe1\ 65797\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:3c13add\ 9-1a78-a549-b67a\ -9c49f8c47b5f\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:3c13add9-1a78\ -a549-b67a-9c49f\ 8c47b5f\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:42-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:f6\ b11691-e85d-8341\ -8835-bf7ad94e7a\ 37\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :44-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x7f#\ \x86=\x00\x00\x01CIDAT8\xcb}\xd3\xcd+\ DQ\x18\xc7\xf1{\x99\xd4\x8c\xb70\xcaj\x94\x92\x12\ Y)&/![\x16,\xd8\xb0\xb3\xb6\x22\xfc\x016\ \xc4Z\xd9\x18\xf9\x0f\x94\x05\xa1(\x22)\x16J\x12\x8b\ \xbba\xe1O\xf0\xbd\xf9\x1d=\x9d\xee\xbdS\x9f\xee\x99\ s\x9e\xf3\xf4\x9c\xb7 \x8a\xa2 \xc34\xfa\xd4\x0e\x93\ b\x92&U\xeb\xbb\x88o<\xa3=-\x89\xfd\x13\x9a\ \xc9\x05\xbca\x08;8P\x7f\x8d\x9f$\xad\xb4U\x9c\ \xa9\xdd\x81wtz1\xa1_A\x09\xe3X\x8a\xfe~\ cfl\x17\xb7\x98\xc2 \x9al\x05\x13\xb8\xc6\x83\x82\ N\xb1\xac\x80*}\x9b\xb1\x87\x0b\xdc\xe1\x11\x15\xb4\xc6\ \x83G8Gm\xdaN{r\xe8\xc5\x0ff\xe3\x8e~\ \xbcb\xce\x04\x84\x19\xa7\x13\xdb\xc7\x09\xf2\xaec\x04_\ X\xf0JOR\xd12\xea\xdd\x1e\xb8\xcc\xa3\xda\xbc\xe1\ \x84$\xae\xbd\xa9\xc9y\xd7\xef\x97w\x85\x19s\xe6n\ rN\xedCl\xd8q[Z\x11\xf7\xe8\xc9(\x7fE\ K\xf8\xaf\xca\x967\x80\x1b\x13<\x89c\x1d_\x97Y\ \xe6\xa5\xddd[\xfe\xbc\xd6W\xd6-\x8c\xdf\xc0\xba\x12\ |bK\x8f\xeb\x05-\xee6\xda\xab\x5cR\xf6'\xac\ \xa1\xceT\xd3\xad\xa3\xfb\xc0v\xd2crI\x1a\xd1\xe0\ \xed\xbe=\x8d6\xef>\x04\xbf\xb8\xfb\xe4\x0c\x16\x85\xca\ \xd0\x00\x00\x00\x00IEND\xaeB`\x82\ \x00\x00\x07Y\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 39-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:58:18-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:58:18-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:9708fed4-1909\ -094a-b315-580e3\ b01742b\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :ac7c84ce-bc78-d\ 643-91cc-45fd5a8\ 5cb7d\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:38e605a\ 3-ea62-0349-87d7\ -b31ff1d62922\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:38e605a3-ea62\ -0349-87d7-b31ff\ 1d62922\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:39-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:97\ 08fed4-1909-094a\ -b315-580e3b0174\ 2b\x22 stEvt:when=\x22\ 2020-05-02T17:58\ :18-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xfd\xc3\ b\xf6\x00\x00\x01\x0eIDAT8\xcb\xa5\xd2\xb1.\ DA\x14\x06\xe0\xb9\xec\x0bH(\xa84\x0a\xabV \ A\xe3\x19\xd8\xe86YBaWT*\x0f\xa0\xd4\x90\ \xa8\xd9]\x9b\xecc\xa84*\x09\xf5\x17Qy\x83U\ 87\xb9\xb9\xb1v\xd9I\xfe\xcc\x9c\x999\xff\xf9\xff\ 3\x93\x06\x83A\x9a\x04ib\x02T0\x854\x06\xb2\ \xb8\x9b#%\x5ca\xafp!\xfd\x05\x09}\x1c\xc5\xc6\ (%\x0b\xd8\xc1\x1aV1\x9b\xd0Fc\x04A\xael\ \x17\x8fx\xc5\x00\xb5\x84.\x0e\xc6PP\x89y\x09\xcf\ \xf8D}\x18A6$\xb9\x8a\x8f\xb0\xdc\xc3y\x99`\ \xfa\x97\xcaU\xdf\xa3\x19q\x1d\xdb9A\xa3\x94\xb4Y\ RR\xc5;N\xcaV\x13:8,$_F\x83.\ \x22^)U.\xda\xcc\xca\x16\xae\xf1\x84\xf5h\xd4-\ \xde\xd0*$g\xe5\x7f\xd0C\x0d\xc7x\xc1\x5c\x1c.\ \xc6s\x9d\x16\xfa\x93\x13\xe4H\xb9\x82}\xccc\xe6?\ ?\xb1\x1d\x95np\x17\x8a:x\xc0}\x14\xe8\xc6^\ q\xdd\xc7V\xc2Fxl\x85\xdcf)n\xfd\x80&\ \xce\xb0\xfc\x05LH\xef\xd4\x03\xdb\x88\xa2\x00\x00\x00\x00\ IEND\xaeB`\x82\ \x00\x00\x07\xb2\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 42-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T18:00:04-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T18:00:04-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:6ee7ec8b-0ef6\ -f24c-bc1e-37f5b\ 27d96b2\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :98797234-7f4f-7\ 64b-90a2-f572f3c\ b4c15\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:ce0f646\ 3-9117-0743-a712\ -8c37f20ad95a\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:ce0f6463-9117\ -0743-a712-8c37f\ 20ad95a\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:42-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:6e\ e7ec8b-0ef6-f24c\ -bc1e-37f5b27d96\ b2\x22 stEvt:when=\x22\ 2020-05-02T18:00\ :04-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\xcb\xc5\ \x8d|\x00\x00\x01gIDAT8\x11\x05\xc1\xb1J\ \x95\x01\x00\x80\xd1\xef\xfeW\x83\x8aR\x83(\x08{\x81\ \xc2jk\xd0\xa9\x07p\xa8!\xa1\x16\xdb\x85\x86\xa4-\ \x92\xb6B\xc1\x9a]li\xb2(pj\x8a\x88\xc4)\ R)\x0a\x9cNsOP\xe7\x84\x10\x06\x84I,a\ \x1d\xdb\xd8\xc2\x1a\xe6\x11\xc2\x80P\x08#\x84E|\xc3\ \x17lb\x05\xabx\x8d\x03|\xc0,\xc2\x80\xc2\x80\xb0\ \x82?\xb8\x87\x10B\x08Sx\x85c\x5cE\x18BX\ \xc4\x1f\x5cA\x08\x13\x180`\x8c\x10V\xf1\x03\xd3(\ L\xe2\x1b\xee#,\xe0!\xc2\x08!<\xc65\x84\x1d\ l\xa0\xb0\x84/\x08\xe12~\xe2\x05B\xd8\xc4!.\ \x22\xdc\xc0\x11f\xc2\x066\x11&\x10.\xe07\x9e\xe0\ )\xbe\xe3\x1cB8\x81},\x84m\xac \x8c0\x81\ p\x01\xff\xf0\x17\xd3\x08\x13\x08\xe1\x1d\x1e\x84-\xac\x22\ \x8c\x11\xc2:>\xe33\x9e!\x84\x10vq7\xaca\ \x1ba\xc0\x08\xaf\xf0\x1dS8\x87_x\x8e\x10\xce\xe2\ \x00sa\x1e\x07\x98B\xb8\x85O8\x8f\x10.\xe1+\ n\x22\xdc\xc1\x1e\xc6!\xbc\xc7K\x84S8\x890\xc6\ \x80p\x1a!\x1cb\x19\x850\x8bc<B\x08!\x84\ \x10\xc2G\xbcA(\x0c\x08W\xf1\x03;\xb8\x8e\x13\x08\ \xe1\x0cn\xe3\x10o0F\x18\x850 Lc\x03G\ \xd8\xc7[\xec\xe2\x00{XF\x08#\x14B\x18\x10\xc2\ \x0c\x16\xb0\x8c%\xcca\x8c\x10F\x08\xfd\x07v`\xf1\ \xd5\x93\x88x\xec\x00\x00\x00\x00IEND\xaeB`\ \x82\ \x00\x00\x07\xc3\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 35-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:57:43-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:57:43-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:3ec36fca-7924\ -fc44-9a65-0c413\ 2d969fb\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :225aa0b1-1ea8-7\ 246-a7eb-4fb1085\ 17c94\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:cbffaf5\ e-3f30-b24a-9bf7\ -f4e918fcd221\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:cbffaf5e-3f30\ -b24a-9bf7-f4e91\ 8fcd221\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:35-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:3e\ c36fca-7924-fc44\ -9a65-0c4132d969\ fb\x22 stEvt:when=\x22\ 2020-05-02T17:57\ :43-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>\x94\xac\ \xf4\xfc\x00\x00\x01xIDAT8\xcb\x85\xd3O\x88\ \xcdQ\x1c\x05\xf0\xfb\xe6\xc9B\x99\xa1\xf4V$\xe3\xcf\ BIS\x16\xf2'y/\x86h\x90?\xd9\xd9)V\ jX[YZY\xb0\xb0@)JV\xac\xccr\x84\ \xac(\x91H\x91\x8f1X)v\xd8\x9c_\xdd~=\ ,n\xf7\xff\xb9\xe7{\xce\xb9\x05gp\x08\x05\xdd\xf4\ M;\x89~\xc6\x9d\xd6^A)\x98\xc2<\xf6gq\ a\xfa\x0bx\x83\xd7\xd8\xf97\x90f\xb0\x07s8\x90\ \xf9\x1a|\xc4(N\xe3\xc1\xbf\x00\xba\x15\xc8<\x06\xe8\ a&\xeb;\xf0\xf4\x7f\x0c\x16\xa4\xdf\x87\xcf\x98\xc4\xd2\ \x80\x0f\xf0\xb0:\xd7\x19\x060R\xd5>\xc0\x8f0j\ \x98\xc1\x8a\xeab\xb7\x01j\xab\xba\x0bw#\xde\x5c\x18\ \x15\x5c\xc6;\x5c\xc2\xea6\x83\x11l\xc1}\xbc\xc4\xf9\ ln\x8f&{3\xdf\x88\xeb\x01\xbf\xd20*8\x8e\ /8\x87q\xac\x8c\xb5\x9b\x92\x8fO\x15\x93\xc5\xd8\x8c\ \x1bx\x8b^\xc1a|\xc3=<\xc6\x93x\xff,\x97\ N\xe0W\xca[\x17K\x1f\xe1\x15\x96\x15,\xc1\xd1\xbc\ \xba!%\xad\xc5\xf3\x08\xdb\x8f\x1e\xef\xc3\xa4\x17\xd0>\ :eX<\xb1(\x00\xa38\x85\xabX\x1fMv\x0f\ \xb3\xb1[ea9.\xe2'\xc6\x92\xc4\xefy\xfdH\ @\x0e6\xf7J+a\xab\xe2\xf9\xad\x88\xd8\x84\xe7\x18\ ^\xe0f\xa8\x7fm\xfeN\x1d\xa4\x82\x09\xfc\xc6\xb5\xb4\ ;\xb8\x1d\xdbf\xf1!\xe7\xa6\xf2\xd0\xd6v\xedc\xa9\ y:\xb6NW\xe3\xb3\xd5\xb7/\xd8\x86\x89?\x18\xca\ \xfcZ\x17#\x1f\xc2\x00\x00\x00\x00IEND\xaeB\ `\x82\ \x00\x00\x07;\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop 2\ 1.0 (Windows)\x22 x\ mp:CreateDate=\x222\ 020-03-03T09:50:\ 41-03:00\x22 xmp:Mo\ difyDate=\x222020-0\ 5-02T17:59:35-03\ :00\x22 xmp:Metadat\ aDate=\x222020-05-0\ 2T17:59:35-03:00\ \x22 dc:format=\x22ima\ ge/png\x22 photosho\ p:ColorMode=\x223\x22 \ photoshop:ICCPro\ file=\x22sRGB IEC61\ 966-2.1\x22 xmpMM:I\ nstanceID=\x22xmp.i\ id:4528a686-ba00\ -8745-97b4-46795\ dc74d2c\x22 xmpMM:D\ ocumentID=\x22adobe\ :docid:photoshop\ :865ce468-cb20-1\ 74d-a7b3-3770f63\ f42c0\x22 xmpMM:Ori\ ginalDocumentID=\ \x22xmp.did:f7af814\ 9-5c41-8547-bbab\ -2773413d74e5\x22> \ <xmpMM:History> \ <rdf:Seq> <rdf:l\ i stEvt:action=\x22\ created\x22 stEvt:i\ nstanceID=\x22xmp.i\ id:f7af8149-5c41\ -8547-bbab-27734\ 13d74e5\x22 stEvt:w\ hen=\x222020-03-03T\ 09:50:41-03:00\x22 \ stEvt:softwareAg\ ent=\x22Adobe Photo\ shop 21.0 (Windo\ ws)\x22/> <rdf:li s\ tEvt:action=\x22sav\ ed\x22 stEvt:instan\ ceID=\x22xmp.iid:45\ 28a686-ba00-8745\ -97b4-46795dc74d\ 2c\x22 stEvt:when=\x22\ 2020-05-02T17:59\ :35-03:00\x22 stEvt\ :softwareAgent=\x22\ Adobe Photoshop \ 21.0 (Windows)\x22 \ stEvt:changed=\x22/\ \x22/> </rdf:Seq> <\ /xmpMM:History> \ </rdf:Descriptio\ n> </rdf:RDF> </\ x:xmpmeta> <?xpa\ cket end=\x22r\x22?>s\x11\ |\x1e\x00\x00\x00\xf0IDAT8\x11\xa5\xc1=.\ \x04@\x18\x00\xd0\xf7\x8d!\xf1\x1b\x89\xce\x098\x85B\ \xa7\xa3\x12\xa5K\xa8\x84K\xa8\x5c@#q\x00\xa5h\ Ei+\x9a\xdd\x96\x02\x8b\xdd\x19\xc5H\x90\xb0!\xfb\ ^\xd4Z\x8d#\xf7z\xbdM\xec\xe2\x19\xc9\xdf\x04\xfa\ \xd8\xcf\xd8\xc1+\xce0\x87j\xb4\xc0\x0b\xf6\xb0\x91Q\ p\x8a\x13\xff\xb3\x8e\x9c5\x8b\x9a)\x0c\xfd.\x10x\ \xc34jF\xa0h\x06(F\x0bME$cJ\xc6\ \x94\xfcO |\x91Q1\xd4\x14\xa3UTMA\xcd\ \x98\xc4\xbcf\x01\xc5\xa7!\x02\x09\x15\x81\xc0\x13f\x90\ 3n\xb0\x87mL\xfan\x16\x03\xf4\x11>\x15,\xe3\ (\xba\xdd\xee4V\x90Q\x11Hx\xc41.p\x8c\ %\x0cP\x91p\x8fN\xc63\xae\xfc\xec\x0e\xd7\xe8\xf8\ E\xd6$\x04\x86X\xc5\x01&\xb0\x86\x15l\xa1\x83C\ \xf4\x91PQ\xb3\xa6 4\x0f\xb8\xc4\x14\xce\x911\x8f\ [\x0c4\xc5\x87w\x90RD\x10\xd65\xe1\xa3\x00\x00\ \x00\x00IEND\xaeB`\x82\ \x00\x00\x07I\ \x89\ PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\ \x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\ :com.adobe.xmp\x00\x00\ \x00\x00\x00<?xpacket beg\ in=\x22\xef\xbb\xbf\x22 id=\x22W5M\ 0MpCehiHzreSzNTc\ zkc9d\x22?> <x:xmpm\ eta xmlns:x=\x22ado\ be:ns:meta/\x22 x:x\ mptk=\x22Adobe XMP \ Core 5.6-c148 79\ .164036, 2019/08\ /13-01:06:57 \ \x22> <rdf:RDF \ xmlns:rdf=\x22http:\ //www.w3.org/199\ 9/02/22-rdf-synt\ ax-ns#\x22> <rdf:De\ scription rdf:ab\ out=\x22\x22 xmlns:xmp\ =\x22http://ns.adob\ e.com/xap/1.0/\x22 \ xmlns:dc=\x22http:/\ /purl.org/dc/ele\ ments/1.1/\x22 xmln\ s:photoshop=\x22htt\ p://ns.adobe.com\ /photoshop/1.0/\x22\ xmlns:xmpMM=\x22ht\ tp://ns.adobe.co\ m/xap/1.0/mm/\x22 x\ mlns:stEvt=\x22http\ ://ns.adobe.com/\ xap/1.0/sType/Re\ sourceEvent#\x22 xm\ p:CreatorTool=\x22A\ dobe Photoshop
from __future__ import absolute_import from past.builtins import basestring import os import tempfile import stat import time import logging import errno import json import re from tqdm import tqdm from zipfile import ZipFile, BadZipfile import os.path as op import shutil from arcana.utils import JSON_ENCODING from arcana.utils import makedirs from arcana.data import Fileset, Field from arcana.repository.base import Repository from arcana.exceptions import ( ArcanaError, ArcanaUsageError, ArcanaFileFormatError, ArcanaWrongRepositoryError) from arcana.pipeline.provenance import Record from arcana.utils import dir_modtime, get_class_info, parse_value import xnat from .dataset import Dataset logger = logging.getLogger('arcana') special_char_re = re.compile(r'[^a-zA-Z_0-9]') tag_parse_re = re.compile(r'\((\d+),(\d+)\)') RELEVANT_DICOM_TAG_TYPES = set(('UI', 'CS', 'DA', 'TM', 'SH', 'LO', 'PN', 'ST', 'AS')) class XnatRepo(Repository): """ An 'Repository' class for XNAT repositories Parameters ---------- server : str (URI) URI of XNAT server to connect to project_id : str The ID of the project in the XNAT repository cache_dir : str (path) Path to local directory to cache remote data in user : str Username with which to connect to XNAT with password : str Password to connect to the XNAT repository with check_md5 : bool Whether to check the MD5 digest of cached files before using. This checks for updates on the server since the file was cached race_cond_delay : int The amount of time to wait before checking that the required fileset has been downloaded to cache by another process has completed if they are attempting to download the same fileset session_filter : str A regular expression that is used to prefilter the discovered sessions to avoid having to retrieve metadata for them, and potentially speeding up the initialisation of the Analysis. Note that if the processing relies on summary derivatives (i.e. of 'per_visit/subject/analysis' frequency) then the filter should match all sessions in the Analysis's subject_ids and visit_ids. """ type = 'xnat' SUMMARY_NAME = 'ALL' MD5_SUFFIX = '.__md5__.json' DERIVED_FROM_FIELD = '__derived_from__' PROV_SCAN = '__prov__' PROV_RESOURCE = 'PROV' depth = 2 def __init__(self, server, cache_dir, user=None, password=<PASSWORD>, check_md5=True, race_cond_delay=30, session_filter=None): super().__init__() if not isinstance(server, basestring): raise ArcanaUsageError( "Invalid server url {}".format(server)) self._server = server self._cache_dir = cache_dir makedirs(self._cache_dir, exist_ok=True) self._user = user self._password = password self._race_cond_delay = race_cond_delay self._check_md5 = check_md5 self._session_filter = session_filter self._login = None def __hash__(self): return (hash(self.server) ^ hash(self.cache_dir) ^ hash(self._race_cond_delay) ^ hash(self._check_md5)) def __repr__(self): return ("{}(server={}, cache_dir={})" .format(type(self).__name__, self.server, self._cache_dir)) def __eq__(self, other): try: return (self.server == other.server and self._cache_dir == other._cache_dir and self.cache_dir == other.cache_dir and self._race_cond_delay == other._race_cond_delay and self._check_md5 == other._check_md5) except AttributeError: return False # For comparison with other types def __getstate__(self): dct = self.__dict__.copy() del dct['_login'] del dct['_connection_depth'] return dct def __setstate__(self, state): self.__dict__.update(state) self._login = None self._connection_depth = 0 @property def prov(self): return { 'type': get_class_info(type(self)), 'server': self.server} @property def login(self): if self._login is None: raise ArcanaError("XNAT repository has been disconnected before " "exiting outer context") return self._login @property def server(self): return self._server @property def cache_dir(self): return self._cache_dir def dataset_cache_dir(self, dataset_name): return op.join(self.cache_dir, dataset_name) @property def check_md5(self): return self._check_md5 @property def session_filter(self): return (re.compile(self._session_filter) if self._session_filter is not None else None) def connect(self): """ Parameters ---------- prev_login : xnat.XNATSession An XNAT login that has been opened in the code that calls the method that calls login. It is wrapped in a NoExitWrapper so the returned connection can be used in a "with" statement in the method. """ sess_kwargs = {} if self._user is not None: sess_kwargs['user'] = self._user if self._password is not None: sess_kwargs['password'] = self._password self._login = xnat.connect(server=self._server, **sess_kwargs) def disconnect(self): self._login.disconnect() self._login = None def dataset(self, name, **kwargs): """ Returns a dataset from the XNAT repository Parameters ---------- name : str The name, path or ID of the dataset within the repository subject_ids : list[str] The list of subjects to include in the dataset visit_ids : list[str] The list of visits to include in the dataset """ return Dataset(name, repository=self, depth=2, **kwargs) def get_fileset(self, fileset): """ Caches a single fileset (if the 'path' attribute is accessed and it has not been previously cached for example Parameters ---------- fileset : Fileset The fileset to cache prev_login : xnat.XNATSession An XNATSession object to use for the connection. A new one is created if one isn't provided Returns ------- primary_path : str The path of the primary file once it has been cached aux_paths : dict[str, str] A dictionary containing a mapping of auxiliary file names to paths """ if fileset.format is None: raise ArcanaUsageError( "Attempting to download {}, which has not been assigned a " "file format (see Fileset.formatted)".format(fileset)) self._check_repository(fileset) with self: # Connect to the XNAT repository if haven't already xsession = self.get_xsession(fileset) xscan = xsession.scans[fileset.name] # Set URI so we can retrieve checksums if required fileset.uri = xscan.uri fileset.id = xscan.id cache_path = self._cache_path(fileset) need_to_download = True if op.exists(cache_path): if self._check_md5: md5_path = cache_path + XnatRepo.MD5_SUFFIX try: with open(md5_path, 'r') as f: cached_checksums = json.load(f) if cached_checksums == fileset.checksums: need_to_download = False except IOError: pass else: need_to_download = False if need_to_download: xresource = xscan.resources[fileset._resource_name] # The path to the directory which the files will be # downloaded to. tmp_dir = cache_path + '.download' try: # Attempt to make tmp download directory. This will # fail if another process (or previous attempt) has # already created it. In that case this process will # wait to see if that download finishes successfully, # and if so use the cached version. os.mkdir(tmp_dir) except OSError as e: if e.errno == errno.EEXIST: # Another process may be concurrently downloading # the same file to the cache. Wait for # 'race_cond_delay' seconds and then check that it # has been completed or assume interrupted and # redownload. self._delayed_download( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path, delay=self._race_cond_delay) else: raise else: self.download_fileset( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path) shutil.rmtree(tmp_dir) if not fileset.format.directory: (primary_path, aux_paths) = fileset.format.assort_files( op.join(cache_path, f) for f in os.listdir(cache_path)) else: primary_path = cache_path aux_paths = None return primary_path, aux_paths def get_field(self, field): self._check_repository(field) with self: xsession = self.get_xsession(field) val = xsession.fields[field.name] val = val.replace('&quot;', '"') val = parse_value(val) return val def put_fileset(self, fileset): if fileset.format is None: raise ArcanaFileFormatError( "Format of {} needs to be set before it is uploaded to {}" .format(fileset, self)) self._check_repository(fileset) # Open XNAT session with self: # Add session for derived scans if not present xsession = self.get_xsession(fileset) cache_path = self._cache_path(fileset) # Make session cache dir cache_path_dir = (op.dirname(cache_path) if fileset.format.directory else cache_path) if os.path.exists(cache_path_dir): shutil.rmtree(cache_path_dir) os.makedirs(cache_path_dir, stat.S_IRWXU | stat.S_IRWXG) if fileset.format.directory: shutil.copytree(fileset.path, cache_path) else: # Copy primary file shutil.copyfile(fileset.path, op.join(cache_path, fileset.fname)) # Copy auxiliaries for sc_fname, sc_path in fileset.aux_file_fnames_and_paths: shutil.copyfile(sc_path, op.join(cache_path, sc_fname)) with open(cache_path + XnatRepo.MD5_SUFFIX, 'w', **JSON_ENCODING) as f: json.dump(fileset.calculate_checksums(), f, indent=2) # Upload to XNAT xscan = self._login.classes.MrScanData( id=fileset.id, type=fileset.basename, parent=xsession) fileset.uri = xscan.uri # Select the first xnat_resource name to use to upload the data to resource_name = fileset.format.resource_names(self.type)[0] try: xresource = xscan.resources[resource_name] except KeyError: pass else: # Delete existing resource # TODO: probably should have check to see if we want to # override it xresource.delete() xresource = xscan.create_resource(resource_name) if fileset.format.directory: for fname in os.listdir(fileset.path): xresource.upload(op.join(fileset.path, fname), fname) else: xresource.upload(fileset.path, fileset.fname) for sc_fname, sc_path in fileset.aux_file_fnames_and_paths: xresource.upload(sc_path, sc_fname) def put_field(self, field): self._check_repository(field) val = field.value if field.array: if field.dtype is str: val = ['"{}"'.format(v) for v in val] val = '[' + ','.join(str(v) for v in val) + ']' if field.dtype is str: val = '"{}"'.format(val) with self: xsession = self.get_xsession(field) xsession.fields[field.name] = val def put_record(self, record, dataset): base_cache_path = self._cache_path( record, name=self.PROV_SCAN, dataset=dataset) if not op.exists(base_cache_path): os.mkdir(base_cache_path) else: if not op.isdir(base_cache_path): raise ArcanaError( "Base provenance cache path ('{}') should be a directory" .format(base_cache_path)) cache_path = op.join(base_cache_path, record.pipeline_name + '.json') record.save(cache_path) # TODO: Should also save digest of prov.json to check to see if it # has been altered remotely xsession = self.get_xsession(record, dataset=dataset) xprov = self._login.classes.MrScanData( id=self.PROV_SCAN, type=self.PROV_SCAN, parent=xsession) # Delete existing provenance if present try: xresource = xprov.resources[record.pipeline_name] except KeyError: pass else: xresource.delete() # FIXME: should reuse the same resource for all provenance jsons xresource = xprov.create_resource(record.pipeline_name) xresource.upload(cache_path, op.basename(cache_path)) def get_checksums(self, fileset): """ Downloads the MD5 digests associated
to account for the wave transformation at the most remote point of the relaxation zone. Parameters ---------- Tstart : float Start time Tend : float End time x0 : numpy.ndarray Position vector of the time series fname : string Filename for timeseries file Lgen : Optional[numpy.ndarray] Length vector of relaxation zone Returns ---------- numpy.ndarray 2D numpy array Nx2 containing free-surface elevation in time. """ if sum(Lgen[:]*self.waveDir[:])< 0 : logEvent('ERROR! Wavetools.py: Location vector of generation zone should not be opposite to the wave direction') sys.exit(1) dt = old_div(self.Tp,50.) Tlag = np.zeros(len(self.omega),) for j in range(len(self.omega)): Tlag[j] = old_div(sum(self.kDir[j,:]*Lgen[:]),self.omega[j]) Tlag = max(Tlag) Tstart = Tstart - Tlag Np = int(old_div((Tend - Tstart),dt)) time = np.linspace(Tstart,Tend,Np ) etaR = np.zeros(len(time), ) for jj in range(len(time)): etaR[jj] = self.eta(x0,time[jj]) np.savetxt(fname,list(zip(time,etaR))) series = np.zeros((len(time),2),) series[:,0] = time series[:,1] = etaR return series class RandomWaves(object): """ This class is used for generating plane random waves using linear reconstruction of components from a wave spectrum Parameters ---------- Tp : float Peak wave period Hs : float Significant wave height mwl : float Still water level depth : float Water depth waveDir : numpy.ndarray Wave direction vector g : Numpy array Gravitational acceleration vector N : int Number of frequency components bandFactor : float Spectral band factor. fmax = bandFactor/Tp, fmin = 1/(bandFactor*Tp) spectName : string Name of spectral distribution spectral_params : dict Dictionary of arguments specific to the spectral distribution Example for JONSWAP = {"gamma": 3.3, "TMA":True,"depth": depth} TMA=True activates the TMA modification, which in turn needs the depth as a parameter phi : numpy.ndarray Component phases (if set to None, phases are picked at random) fast : bool Switch for optimised functions """ def __cinit__(self, Tp, Hs, mwl,#m significant wave height depth , #m depth waveDir, g, #peak frequency N, bandFactor, #accelerationof gravity spectName ,# random words will result in error and return the available spectra spectral_params = None, #JONPARAMS = {"gamma": 3.3, "TMA":True,"depth": depth} phi=None, fast = True ): self.fast= fast validSpectra = [JONSWAP,PM_mod] spec_fun =loadExistingFunction(spectName, validSpectra) self.g = np.array(g) waveDir = setDirVector(np.array(waveDir)) self.waveDir = waveDir self.vDir = setVertDir(g) dirCheck(self.waveDir,self.vDir) self.gAbs = sqrt(self.g[0]*self.g[0]+self.g[1]*self.g[1]+self.g[2]*self.g[2]) self.Hs = Hs self.depth = depth self.Tp = Tp self.fp = old_div(1.,Tp) self.bandFactor = bandFactor self.N = N self.mwl = mwl fmax = self.bandFactor*self.fp fmin = old_div(self.fp,self.bandFactor) self.df = old_div((fmax-fmin),float(self.N-1)) self.fi = np.linspace(fmin,fmax,self.N) self.omega = 2.*M_PI*self.fi self.ki = dispersion(self.omega,self.depth,g=self.gAbs) omega_p = 2.*M_PI/Tp self.wavelength = 2.*M_PI/dispersion(omega_p,self.depth,g=self.gAbs) if phi is None: self.phi = 2.0*M_PI*np.random.random(self.fi.shape[0]) logEvent('INFO Wavetools.py: No phase array is given. Assigning random phases. Outputing the phasing of the random waves') else: try: self.phi = np.array(phi) if self.phi.shape[0] != self.fi.shape[0]: logEvent('ERROR! Wavetools.py: Phase array must have N elements') sys.exit(1) except: logEvent('ERROR! Wavetools.py: phi argument must be an array with N elements') sys.exit(1) #ai = np.sqrt((Si_J[1:]+Si_J[:-1])*(fi[1:]-fi[:-1])) fim = reduceToIntervals(self.fi,self.df) self.fim = fim if (spectral_params is None): self.Si_Jm = spec_fun(fim,self.fp,self.Hs) else: try: self.Si_Jm = spec_fun(fim,self.fp,self.Hs,**spectral_params) except: logEvent('ERROR! Wavetools.py: Additional spectral parameters are not valid for the %s spectrum' %spectName) sys.exit(1) self.tanhF = np.zeros(N,"d") for ii in range(self.N): self.tanhF[ii] = float(np.tanh(self.ki[ii]*self.depth) ) self.ai = np.sqrt(2.*returnRectangles(self.Si_Jm,fim)) self.kDir = np.zeros((len(self.ki),3),) for ii in range(3): self.kDir[:,ii] = self.ki[:] * self.waveDir[ii] if(self.N > 10000): logEvent("ERROR! Wavetools.py: Maximum number of frequencies for Random Waves is 10000 ",level=0) #C++ declarations for ij in range(3): self.waveDir_c[ij] = self.waveDir[ij] self.vDir_c[ij] = self.vDir[ij] self.waveDir_ = self.waveDir_c self.vDir_ = self.vDir_c for ij in range(self.N): for kk in range(3): self.kDir_c[3*ij+kk] = self.kDir[ij,kk] self.omega_c[ij] = self.omega[ij] self.ki_c[ij] =self.ki[ij] self.tanh_c[ij] = self.tanhF[ij] self.ai_c[ij] = self.ai[ij] self.phi_c[ij] = self.phi[ij] self.kDir_ = self.kDir_c self.omega_ = self.omega_c self.ki_ =self.ki_c self.ai_ = self.ai_c self.tanh_ = self.tanh_c self.phi_ = self.phi_c def _cpp_eta(self, x, t): return __cpp_etaRandom(x,t,self.kDir_, self.omega_,self.phi_,self.ai_, self.N, self.fast) def eta(self, x, t): """Calculates free surface elevation (RandomWaves class) Parameters ---------- x : numpy.ndarray Position vector t : float Time variable Returns -------- float Free-surface elevation as a float """ cython.declare(xx=cython.double[3]) xx[0] = x[0] xx[1] = x[1] xx[2] = x[2] return self._cpp_eta(xx,t) def _cpp_u(self, U, x, t): __cpp_uRandom(U, x,t,self.kDir_, self.ki_, self.omega_,self.phi_,self.ai_,self.mwl,self.depth, self.N, self.waveDir_, self.vDir_, self.tanh_, self.gAbs, self.fast) def u(self, x, t): """Calculates wave velocity vector (RandomWaves class) Parameters ---------- x : numpy.ndarray Position vector t : float Time variable Returns -------- numpy.ndarray Velocity vector as 1D array """ cython.declare(xx=cython.double[3]) cython.declare(cppU=cython.double[3]) for ii in range(3): xx[ii] = x[ii] cppU[ii] = 0. U = np.zeros(3,) self._cpp_u(cppU,xx,t) U[0] = cppU[0] U[1] = cppU[1] U[2] = cppU[2] return U def writeEtaSeries(self,Tstart,Tend,x0,fname,Lgen= np.array([0.,0,0])): """Writes a timeseries of the free-surface elevation It also returns the free surface elevation as a time-eta array. If Lgen !=[0.,0.,0.,] then Tstart is modified to account for the wave transformation at the most remote point of the relaxation zone. Parameters ---------- Tstart : float Start time Tend : float End time x0 : numpy.ndarray Position vector of the time series fname : string Filename for timeseries file Lgen : Optional[numpy.ndarray] Length vector of relaxation zone Returns ---------- numpy.ndarray 2D numpy array Nx2 containing free-surface elevation in time. """ if sum(Lgen[:]*self.waveDir[:])< 0 : logEvent('ERROR! Wavetools.py: Location vector of generation zone should not be opposite to the wave direction') sys.exit(1) dt = old_div(self.Tp,50.) Tlag = np.zeros(len(self.omega),) for j in range(len(self.omega)): Tlag[j] = old_div(sum(self.kDir[j,:]*Lgen[:]),self.omega[j]) Tlag = max(Tlag) Tstart = Tstart - Tlag Np = int(old_div((Tend - Tstart),dt)) time = np.linspace(Tstart,Tend,Np ) etaR = np.zeros(len(time), ) for jj in range(len(time)): etaR[jj] = self.eta(x0,time[jj]) np.savetxt(fname,list(zip(time,etaR))) series = np.zeros((len(time),2),) series[:,0] = time series[:,1] = etaR return series class MultiSpectraRandomWaves(object): """This class is used for generating random waves by combining multiple spectra with different distributions and directions Parameters ---------- Nspectra : int Total number of spectra Tp : list List of peak wave periods Hs : list List of significant wave heights mwl : float Still water level depth : float Water depth waveDir : list List of wave direction vector g : Numpy array Gravitational acceleration vector N : list List of numbers of frequency components bandFactor : list List of spectral band factors spectName : list List of names of spectral distribution spectral_params : list List of names of spectral distribution (see RandomWaves class) phi : list List of component phases fast : bool Switch for optimised functions """ def __cinit__(self, Nspectra, Tp, # np array with Hs, mwl,#m significant wave height depth , #m depth waveDir, g, #peak frequency N, bandFactor, #accelerationof gravity spectName ,# random words will result in error and return the available spectra spectral_params, #JONPARAMS = {"gamma": 3.3, "TMA":True,"depth": depth} phi, fast=True ): # Checking length of arrays / lists to be equal to NSpectra self.fast = fast try: if (len(Tp) != Nspectra) or (len(Hs) != Nspectra) or (len(waveDir) != Nspectra) or \ (len(N) != Nspectra) or (len(bandFactor) != Nspectra) or \ (len(spectName) != Nspectra) or (len(spectral_params) != Nspectra) or(len(phi) != Nspectra): logEvent('ERROR! Wavetools.py: Parameters passed in MultiSpectraRandomWaves must be in array or list form with length Nspectra ') sys.exit(1) except: logEvent('ERROR! Wavetools.py: Parameters passed in MultiSpectraRandomWaves must be in array or list form with length Nspectra ') sys.exit(1) # Initialize numpy arrays for complete reconstruction self.Nall = 0 self.mwl = mwl self.depth = depth self.g = np.array(g) self.vDir = setVertDir(g) self.gAbs = sqrt(g[0]*g[0]+g[1]*g[1]+g[2]*g[2]) for nn in N: self.Nall+=nn if(self.Nall > 10000): logEvent("ERROR! Wavetools.py: Maximum (number of frequencies) x (No of spectra) for MultispectraRandomWaves is 10000 ",level=0) self.tanhFM = np.zeros(self.Nall,"d") self.omegaM = np.zeros(self.Nall,"d") self.kiM = np.zeros(self.Nall,"d") self.aiM = np.zeros(self.Nall,"d") self.kDirM = np.zeros((self.Nall,3),"d") self.phiM= np.zeros(self.Nall,"d") self.waveDir = np.zeros((self.Nall,3),"d") NN = 0 for kk in range(Nspectra): logEvent("INFO Wavetools.py: Reading spectra No %s" %kk) NN1 = NN NN +=N[kk] RW = RandomWaves( Tp[kk], # np array with Hs[kk], mwl,#m significant wave height depth, #m depth waveDir[kk], g, #peak frequency N[kk], bandFactor[kk], #accelerationof gravity spectName[kk],# random words will result in error and return the available spectra spectral_params[kk], #JONPARAMS = {"gamma": 3.3, "TMA":True,"depth": depth} phi[kk], self.fast )
metadata does not have field 'other_properties'.", "Collection 'foobar' metadata does not have field 'properties'.", } def test_normalize_collection_metadata_minimal_100(self, caplog): assert _normalize_collection_metadata({"id": "foobar"}, api_version=ComparableVersion("1.0.0")) == { 'id': 'foobar', 'stac_version': '0.9.0', 'stac_extensions': ['datacube'], 'description': 'foobar', 'extent': {'spatial': {'bbox': [[0, 0, 0, 0]]}, 'temporal': {'interval': [[None, None]]}}, 'license': 'proprietary', 'links': [], } warnings = set(r.getMessage() for r in caplog.records if r.levelno == logging.WARN) assert warnings == {"Collection 'foobar' metadata does not have field 'extent'."} def test_normalize_collection_metadata_minimal_full_100(self, caplog): assert _normalize_collection_metadata({"id": "foobar"}, api_version=ComparableVersion("1.0.0"), full=True) == { 'id': 'foobar', 'stac_version': '0.9.0', 'stac_extensions': ['datacube'], 'description': 'foobar', 'extent': {'spatial': {'bbox': [[0, 0, 0, 0]]}, 'temporal': {'interval': [[None, None]]}}, 'license': 'proprietary', 'cube:dimensions': {}, 'summaries': {}, 'links': [], } warnings = set(r.getMessage() for r in caplog.records if r.levelno == logging.WARN) assert warnings == { "Collection 'foobar' metadata does not have field 'cube:dimensions'.", "Collection 'foobar' metadata does not have field 'extent'.", "Collection 'foobar' metadata does not have field 'summaries'." } def test_normalize_collection_metadata_cube_dimensions_extent_full_100(self, caplog): metadata = { "id": "foobar", "extent": { "spatial": {"bbox": [[-180, -56, 180, 83]]}, "temporal": {"interval": [["2015-07-06", None]]} }, "cube:dimensions": { "x": {"type": "spatial", "axis": "x"}, "y": {"type": "spatial", "axis": "y"}, "t": {"type": "temporal"}, }, } assert _normalize_collection_metadata(metadata, api_version=ComparableVersion("1.0.0"), full=True) == { 'id': 'foobar', 'stac_version': '0.9.0', 'stac_extensions': ['datacube'], 'description': 'foobar', 'extent': { 'spatial': {'bbox': [[-180, -56, 180, 83]]}, 'temporal': {'interval': [["2015-07-06T00:00:00Z", None]]} }, 'license': 'proprietary', "cube:dimensions": { "x": {"type": "spatial", "axis": "x", "extent": [-180, 180]}, "y": {"type": "spatial", "axis": "y", "extent": [-56, 83]}, "t": {"type": "temporal", "extent": ["2015-07-06T00:00:00Z", None]}, }, 'summaries': {}, 'links': [], } def test_normalize_collection_metadata_dimensions_and_bands_040(self, caplog): metadata = { "id": "foobar", "cube:dimensions": { "x": {"type": "spatial"}, "b": {"type": "bands", "values": ["B02", "B03"]} }, "summaries": { "eo:bands": [{"name": "B02"}, {"name": "B03"}] } } res = _normalize_collection_metadata(metadata, api_version=ComparableVersion("0.4.0"), full=True) assert res["properties"]["cube:dimensions"] == { "x": {"type": "spatial"}, "b": {"type": "bands", "values": ["B02", "B03"]} } assert res["properties"]["eo:bands"] == [{"name": "B02"}, {"name": "B03"}] def test_normalize_collection_metadata_dimensions_and_bands_100(self, caplog): metadata = { "id": "foobar", "properties": { "cube:dimensions": { "x": {"type": "spatial"}, "b": {"type": "bands", "values": ["B02", "B03"]} }, "eo:bands": [{"name": "B02"}, {"name": "B03"}] } } res = _normalize_collection_metadata(metadata, api_version=ComparableVersion("1.0.0"), full=True) assert res["cube:dimensions"] == { "x": {"type": "spatial"}, "b": {"type": "bands", "values": ["B02", "B03"]} } assert res["summaries"]["eo:bands"] == [{"name": "B02"}, {"name": "B03"}] def test_normalize_collection_metadata_datetime(self, caplog): metadata = { "id": "foobar", "extent": { "temporal": { "interval": [["2009-08-07", "2009-10-11"], ["2011-12-13 14:15:16", None]], } } } res = _normalize_collection_metadata(metadata, api_version=ComparableVersion("1.0.0"), full=True) assert res["extent"]["temporal"]["interval"] == [ ['2009-08-07T00:00:00Z', '2009-10-11T00:00:00Z'], ['2011-12-13T14:15:16Z', None], ] def test_collections(self, api): resp = api.get('/collections').assert_status_code(200).json assert "links" in resp assert "collections" in resp assert 'S2_FAPAR_CLOUDCOVER' in [c['id'] for c in resp['collections']] assert 'S2_FOOBAR' in [c['id'] for c in resp['collections']] for collection in resp['collections']: assert 'id' in collection assert 'stac_version' in collection assert 'description' in collection assert 'license' in collection assert 'extent' in collection assert 'links' in collection def test_strip_private_fields(self, api): assert '_private' in dummy_backend.DummyCatalog().get_collection_metadata("S2_FOOBAR") # All metadata collections = api.get('/collections').assert_status_code(200).json["collections"] metadata, = (c for c in collections if c["id"] == "S2_FOOBAR") assert '_private' not in metadata # Single collection metadata metadata = api.get('/collections/S2_FOOBAR').assert_status_code(200).json assert '_private' not in metadata def test_collections_detail_invalid_collection(self, api): error = api.get('/collections/FOOBOO').assert_error(404, "CollectionNotFound").json assert error["message"] == "Collection 'FOOBOO' does not exist." def test_collections_detail(self, api): collection = api.get('/collections/S2_FOOBAR').assert_status_code(200).json assert collection['id'] == 'S2_FOOBAR' assert collection['description'] == 'S2_FOOBAR' assert collection['license'] == 'free' cube_dimensions = { 'x': {'extent': [2.5, 6.2], 'reference_system': 'AUTO:42001', 'step': 10, 'type': 'spatial'}, 'y': {'extent': [49.5, 51.5], 'reference_system': 'AUTO:42001', 'step': 10, 'type': 'spatial'}, "t": {"type": "temporal", "extent": ["2019-01-01", None]}, "bands": {"type": "bands", "values": ["B02", "B03", "B04", "B08"]} } eo_bands = [ {"name": "B02", "common_name": "blue"}, {"name": "B03", "common_name": "green"}, {"name": "B04", "common_name": "red"}, {"name": "B08", "common_name": "nir"}, ] if api.api_version_compare.at_least("1.0.0"): assert collection['stac_version'] == '0.9.0' assert collection['cube:dimensions'] == cube_dimensions assert collection['summaries']['eo:bands'] == eo_bands assert collection['extent']['spatial'] == {'bbox': [[2.5, 49.5, 6.2, 51.5]]} assert collection['extent']['temporal'] == {'interval': [['2019-01-01T00:00:00Z', None]]} else: assert collection['stac_version'] == '0.6.2' assert collection['properties']['cube:dimensions'] == cube_dimensions assert collection['properties']["eo:bands"] == eo_bands assert collection['extent'] == { 'spatial': [2.5, 49.5, 6.2, 51.5], 'temporal': ['2019-01-01T00:00:00Z', None] } class TestBatchJobs: AUTH_HEADER = TEST_USER_AUTH_HEADER @staticmethod @contextmanager def _fresh_job_registry(next_job_id): """Set up a fresh job registry and predefine next job id""" with mock.patch.object(dummy_backend.DummyBatchJobs, 'generate_job_id', return_value=next_job_id): dummy_backend.DummyBatchJobs._job_registry = { (TEST_USER, '07024ee9-7847-4b8a-b260-6c879a2b3cdc'): BatchJobMetadata( id='07024ee9-7847-4b8a-b260-6c879a2b3cdc', status='running', process={'process_graph': {'foo': {'process_id': 'foo', 'arguments': {}}}}, created=datetime(2017, 1, 1, 9, 32, 12), ), (TEST_USER, '53c71345-09b4-46b4-b6b0-03fd6fe1f199'): BatchJobMetadata( id='53c71345-09b4-46b4-b6b0-03fd6fe1f199', status='finished', process={'process_graph': {'foo': {'process_id': 'foo', 'arguments': {}}}}, created=datetime(2020, 6, 11, 11, 51, 29), started=datetime(2020, 6, 11, 11, 55, 9), finished=datetime(2020, 6, 11, 11, 55, 15), memory_time_megabyte=timedelta(seconds=18704944), cpu_time=timedelta(seconds=1621), geometry={ "type": "Polygon", "coordinates": [[[-180, -90], [180, -90], [180, 90], [-180, 90], [-180, -90]]] }, bbox=[-180, -90, 180, 90], start_datetime=datetime(1981, 4, 24, 3, 0, 0), end_datetime=datetime(1981, 4, 24, 3, 0, 0), instruments=['MSI'], epsg=4326 ) } yield def test_create_job_040(self, api040): with self._fresh_job_registry(next_job_id="job-220"): resp = api040.post('/jobs', headers=self.AUTH_HEADER, json={ 'title': 'foo job', 'process_graph': {"foo": {"process_id": "foo", "arguments": {}}}, }).assert_status_code(201) assert resp.headers['Location'] == 'http://oeo.net/openeo/0.4.0/jobs/job-220' assert resp.headers['OpenEO-Identifier'] == 'job-220' job_info = dummy_backend.DummyBatchJobs._job_registry[TEST_USER, 'job-220'] assert job_info.id == "job-220" assert job_info.process == {"process_graph": {"foo": {"process_id": "foo", "arguments": {}}}} assert job_info.status == "created" assert job_info.created == dummy_backend.DEFAULT_DATETIME assert job_info.job_options is None def test_create_job_with_options_040(self, api040): with self._fresh_job_registry(next_job_id="job-230"): resp = api040.post('/jobs', headers=self.AUTH_HEADER, json={ 'title': 'foo job', 'process_graph': {"foo": {"process_id": "foo", "arguments": {}}}, 'job_options': {"driver-memory": "3g", "executor-memory": "5g"}, }).assert_status_code(201) assert resp.headers['Location'] == 'http://oeo.net/openeo/0.4.0/jobs/job-230' assert resp.headers['OpenEO-Identifier'] == 'job-230' job_info = dummy_backend.DummyBatchJobs._job_registry[TEST_USER, 'job-230'] assert job_info.job_options == {"driver-memory": "3g", "executor-memory": "5g"} def test_create_job_100(self, api100): with self._fresh_job_registry(next_job_id="job-245"): resp = api100.post('/jobs', headers=self.AUTH_HEADER, json={ 'process': { 'process_graph': {"foo": {"process_id": "foo", "arguments": {}}}, 'summary': 'my foo job', }, "title": "Foo job", "description": "Run the `foo` process!" }).assert_status_code(201) assert resp.headers['Location'] == 'http://oeo.net/openeo/1.0.0/jobs/job-245' assert resp.headers['OpenEO-Identifier'] == 'job-245' job_info = dummy_backend.DummyBatchJobs._job_registry[TEST_USER, 'job-245'] assert job_info.id == "job-245" assert job_info.process == {"process_graph": {"foo": {"process_id": "foo", "arguments": {}}}} assert job_info.status == "created" assert job_info.created == dummy_backend.DEFAULT_DATETIME assert job_info.job_options is None assert job_info.title == "Foo job" assert job_info.description == "Run the `foo` process!" def test_create_job_100_with_options(self, api100): with self._fresh_job_registry(next_job_id="job-256"): resp = api100.post('/jobs', headers=self.AUTH_HEADER, json={ 'process': { 'process_graph': {"foo": {"process_id": "foo", "arguments": {}}}, 'summary': 'my foo job', }, 'job_options': {"driver-memory": "3g", "executor-memory": "5g"}, }).assert_status_code(201) assert resp.headers['Location'] == 'http://oeo.net/openeo/1.0.0/jobs/job-256' assert resp.headers['OpenEO-Identifier'] == 'job-256' job_info = dummy_backend.DummyBatchJobs._job_registry[TEST_USER, 'job-256'] assert job_info.job_options == {"driver-memory": "3g", "executor-memory": "5g"} def test_start_job(self, api): with self._fresh_job_registry(next_job_id="job-267"): api.post('/jobs', headers=self.AUTH_HEADER, json=api.get_process_graph_dict( {"foo": {"process_id": "foo", "arguments": {}}}, )).assert_status_code(201) assert dummy_backend.DummyBatchJobs._job_registry[TEST_USER, 'job-267'].status == "created" api.post('/jobs/job-267/results', headers=self.AUTH_HEADER, json={}).assert_status_code(202) assert dummy_backend.DummyBatchJobs._job_registry[TEST_USER, 'job-267'].status == "running" def test_start_job_invalid(self, api): resp = api.post('/jobs/deadbeef-f00/results', headers=self.AUTH_HEADER) resp.assert_error(404, "JobNotFound") assert resp.json["message"] == "The batch job 'deadbeef-f00' does not exist." def test_get_job_info_040(self, api040): resp = api040.get('/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { 'id': '07024ee9-7847-4b8a-b260-6c879a2b3cdc', 'status': 'running', 'submitted': "2017-01-01T09:32:12Z", 'process_graph': {'foo': {'process_id': 'foo', 'arguments': {}}}, } def test_get_job_info_metrics_100(self, api100): resp = api100.get('/jobs/53c71345-09b4-46b4-b6b0-03fd6fe1f199', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { 'id': '53c71345-09b4-46b4-b6b0-03fd6fe1f199', 'status': 'finished', 'created': "2020-06-11T11:51:29Z", 'process': {'process_graph': {'foo': {'process_id': 'foo', 'arguments': {}}}}, 'duration_seconds': 6, 'duration_human_readable': "0:00:06", 'memory_time_megabyte_seconds': 18704944, 'memory_time_human_readable': "18704944 MB-seconds", 'cpu_time_seconds': 1621, 'cpu_time_human_readable': "1621 cpu-seconds" } def test_get_job_info_100(self, api100): resp = api100.get('/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { 'id': '07024ee9-7847-4b8a-b260-6c879a2b3cdc', 'status': 'running', 'created': "2017-01-01T09:32:12Z", 'process': {'process_graph': {'foo': {'process_id': 'foo', 'arguments': {}}}}, } def test_get_job_info_invalid(self, api): resp = api.get('/jobs/deadbeef-f00', headers=self.AUTH_HEADER).assert_error(404, "JobNotFound") assert resp.json["message"] == "The batch job 'deadbeef-f00' does not exist." def test_list_user_jobs_040(self, api040): with self._fresh_job_registry(next_job_id="job-318"): resp = api040.get('/jobs', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { "jobs": [ { 'id': '07024ee9-7847-4b8a-b260-6c879a2b3cdc', 'status': 'running', 'submitted': "2017-01-01T09:32:12Z", }, { 'id': '53c71345-09b4-46b4-b6b0-03fd6fe1f199', 'status': 'finished', 'submitted': "2020-06-11T11:51:29Z" } ], "links": [] } def test_list_user_jobs_100(self, api100): with self._fresh_job_registry(next_job_id="job-332"): resp = api100.get('/jobs', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { "jobs": [ { 'id': '07024ee9-7847-4b8a-b260-6c879a2b3cdc', 'status': 'running', 'created': "2017-01-01T09:32:12Z", }, { 'id': '53c71345-09b4-46b4-b6b0-03fd6fe1f199', 'status': 'finished', 'created': "2020-06-11T11:51:29Z" } ], "links": [] } def test_get_job_results_unfinished(self, api): with self._fresh_job_registry(next_job_id="job-345"): resp = api.get('/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc/results', headers=self.AUTH_HEADER) resp.assert_error(400, "JobNotFinished") def test_get_job_results_040(self, api040): with self._fresh_job_registry(next_job_id="job-349"): dummy_backend.DummyBatchJobs._update_status( job_id="07024ee9-7847-4b8a-b260-6c879a2b3cdc", user_id=TEST_USER, status="finished") resp = api040.get('/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc/results', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { "links": [ { "href": "http://oeo.net/openeo/0.4.0/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc/results/output.tiff" } ] } def test_get_job_results_100(self, api100): with self._fresh_job_registry(next_job_id="job-362"): dummy_backend.DummyBatchJobs._update_status( job_id="07024ee9-7847-4b8a-b260-6c879a2b3cdc", user_id=TEST_USER, status="finished") resp = api100.get('/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc/results', headers=self.AUTH_HEADER) assert resp.assert_status_code(200).json == { 'assets': { 'output.tiff': { 'roles': ['data'], 'title': 'output.tiff', 'href': 'http://oeo.net/openeo/1.0.0/jobs/07024ee9-7847-4b8a-b260-6c879a2b3cdc/results/output.tiff', 'type': 'image/tiff; application=geotiff', 'eo:bands': [{ 'name': "NDVI", 'center_wavelength': 1.23 }], 'file:nodata':[123] } }, 'geometry': None, 'id': '07024ee9-7847-4b8a-b260-6c879a2b3cdc', 'links': [ {
14400 data[var[k]][~Samp_Good_Sonic] =data[var[k]]+'1' data[var[k]][Samp_Good_Sonic] = data[var[k]]+'0' if 'Fc_samples_Tot' in df.columns: # Check if enough samples in Fc column (80%) coverage Samp_Good_IRGA = df['Fc_samples_Tot'].astype(float)>14400 data[var[k]][~Samp_Good_IRGA] = data[var[k]]+'1' data[var[k]][Samp_Good_IRGA] = data[var[k]]+'0' #Door Open Mask if 'door_is_open_Hst' in df.columns: # Check if door open meaning people at the site doing work Door_Closed = df['door_is_open_Hst'].astype(float) == 0 data[var[k]][~Door_Closed] = data[var[k]]+'1' data[var[k]][Door_Closed] = data[var[k]]+'0' Good = Precip & Grade & Door_Closed&~HL&c_sig_strength&w_sig_strength # Create single boolean from all the qc checks; only one fail will trigger fail Good = Good & (Samp_Good_Sonic | Samp_Good_IRGA) else: # If door open is not part of the column set; should be with the logger data Good = Grade &~HL Good = Good & (Samp_Good_Sonic | Samp_Good_IRGA) data[(col[k]+'_Graded')][~Good] = np.NaN # Create column with nan/blank in the column if data is bad/filtered if k == 0: G = Good; if k >0: G = pd.concat([G,Good],axis=1, sort = False) del Good # Delete Good variable for the next round of flux data. return data #%% def METQC(Data, col): # Driver for the met qc function to deal with some column shenanigans if col == 'Flux': # Different columns between the two for some reason; think has to do with the way the tables were constructed in the logger code Met_QC = Met_QAQC(RH=Data['RH_Avg'].astype(float),P=Data['amb_press_Avg'].astype(float), Tair = Data['amb_tmpr_Avg'].astype(float), WS = Data['rslt_wnd_spd'].astype(float), WD = Data['wnd_dir_compass'].astype(float), Precip = Data['Precipitation_Tot'].astype(float), PAR =Data['PAR_density_Avg'].astype(float), Rn = Data['Rn_meas_Avg'].astype(float),VPD = Data['VPD_air'].astype(float),e = Data['e_Avg'].astype(float), e_s = Data['e_sat_Avg'].astype(float),z = 0.777) if col == 'Met': # Met_QC = Met_QAQC(RH=Data['RH_Avg'].astype(float),P=Data['amb_press_Avg'].astype(float), Tair = Data['amb_tmpr_Avg'].astype(float), WS = Data['rslt_wnd_spd'].astype(float), WD = Data['wnd_dir_compass'].astype(float), Precip = Data['Precipitation_Tot'].astype(float), PAR =Data['PAR_density_Avg'].astype(float), Rn = Data['Rn_meas_Avg'].astype(float),VPD = Data['VPD_air'].astype(float),e = Data['e'].astype(float), e_s = Data['e_sat'].astype(float),z = 0.777) if 'Tair_Filtered' in Data.columns: # Checks if the data has already been through the QC code or not; for k in range(0,len(Met_QC.columns)): Data = Data.drop(columns=[Met_QC.columns[k]]) # Drops all columns in the metqc variable before readding them back; the QC occurs over the entire dataframe so will re-addd what was deleted; prevents adding multiple columns to the dataframe with the same header # Not sure why this is the case and this is a quick fix but don't like it Data = pd.concat([Data,Met_QC], axis = 1, sort=False) # Concat the metqc values to the dataframe. return Data def Met_QAQC(**kwargs): Q = None if 'Tair' in kwargs.keys(): # Air temperature Tair = pd.DataFrame(kwargs['Tair']) Q = Tair; Q = pd.DataFrame(Q); Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40) # Bounds check Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 15)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # Check if data change between each time step Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0) # Checks if the daily average changes from zero Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']] #Adds filters and booleans together Q.drop(columns=[Tair.columns[0]],inplace=True) # Drops the columns that are filtered out; probably a better way to do this else: print('******Temperature not present******') if 'RH' in kwargs.keys(): RH = pd.DataFrame(kwargs['RH']) if Q is None: Q = RH; Q = pd.DataFrame(Q) else: Q= Q.join(RH) Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 103) & (Q[RH.columns[0]].astype(float) >= 0) Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 103) Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0) Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']] Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100) # Q['RH_Filtered'][Q['RH_gt_100']]=100 Q.drop(columns=[RH.columns[0]],inplace=True) else: print('**** RH not present ****') if 'P' in kwargs.keys(): # Pressure checks; converts from pressure to MSLP as well; checks between the two P = pd.DataFrame(kwargs['P']); if Q is None: Q = P; Q = pd.DataFrame(Q) else: Q= Q.join(P) Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 80) Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']] if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()): MSLP = []; H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) # Scale height x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]); MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) # Mean Sea Level Pressure MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:"MSLP"}) Q= Q.join(MSLP) Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80) Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) #& (~np.isnan(Q[MSLP.columns[0]].diff())) Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']] else: print('**** Mean sea level pressure not present ****') Q.drop(columns=[P.columns[0]],inplace=True) else: print('**** Pressure not present ****') if 'WS' in kwargs.keys(): # Wind speed WS = pd.DataFrame(kwargs['WS']) if Q is None: Q = WS; Q = pd.DataFrame(Q) else: Q= Q.join(WS) Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0) Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) #& (~np.isnan(Q[WS.columns[0]].diff())) Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0) Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']] Q.drop(columns=[WS.columns[0]],inplace=True) else: print('**** Wind Speed not present ****') if 'WD' in kwargs.keys(): # Wind direction WD = pd.DataFrame(kwargs['WD']) if Q is None: Q = WD; Q = pd.DataFrame(Q) else: Q= Q.join(WD) Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0) Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) # (~np.isnan(Q[WD.columns[0]].diff())) & Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']] Q.drop(columns=[WD.columns[0]],inplace=True) else: print('**** Wind Direction not present ****') if 'PAR' in kwargs.keys(): PAR = pd.DataFrame(kwargs['PAR']); if Q is None: Q = PAR; Q = pd.DataFrame(Q) else: Q= Q.join(PAR) Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000) Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500))# & (~np.isnan(Q[PAR.columns[0]].diff())) Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) # Causing problems for some reason Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']] Q.drop(columns=[PAR.columns[0]],inplace=True) else: print('**** PAR not present ****') if 'Rn' in kwargs.keys(): Rn = pd.DataFrame(kwargs['Rn']) if Q is None: Q = Rn; Q = pd.DataFrame(Q) else: Q= Q.join(Rn) Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500) Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) #& (~np.isnan(Q[Rn.columns[0]].astype(float).diff())) Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0) Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']] Q.drop(columns=[Rn.columns[0]],inplace=True) else: print('**** Net Radiations not present ****') if 'Precip' in kwargs.keys(): # Lot of filters because of the difference of precip is there is or is not RH and check for frozen precip with temperature as the tipping bucket is bad with snow Precip = pd.DataFrame(kwargs['Precip']) if Q is None: Q = P; Q = pd.DataFrame(Q) else: Q= Q.join(Precip) Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0) Z_Precip = Q[Precip.columns[0]].astype(float) ==0 if ('RH' in kwargs.keys()) & ('Tair' in kwargs.keys()): # Checks for temp and RH in correct ranges. Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90) Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0) Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH_gt_90']&~Q['Precip_Tair_lt_Zero']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) Q.drop(columns=[Precip.columns[0]],inplace=True) elif ('RH' in kwargs.keys()) & ('Tair' not in kwargs.keys()): Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90) Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) Q.drop(columns=[Precip.columns[0]],inplace=True) elif ('RH' not in kwargs.keys()) & ('Tair' in kwargs.keys()): Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0) Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) Q.drop(columns=[Precip.columns[0]],inplace=True) else: Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) Q.drop(columns=[Precip.columns[0]],inplace=True) else: print('**** Precipitation not present ****') if 'VPD' in kwargs.keys(): VPD = pd.DataFrame(kwargs['VPD']) if Q is None: Q = VPD; Q = pd.DataFrame(Q) else: Q= Q.join(VPD) Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0) Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0)) Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0) Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']] Q.drop(columns=[VPD.columns[0]],inplace=True) if 'e' in kwargs.keys(): e = pd.DataFrame(kwargs['e']) if Q is None: Q = e; Q = pd.DataFrame(Q) else: Q= Q.join(e) Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0) Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0)) Q['e_Day_Change'] = (e.resample('D').mean().diff !=0) Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']] Q.drop(columns=[e.columns[0]],inplace=True) if 'e_s' in kwargs.keys(): e_s = pd.DataFrame(kwargs['e_s']) if Q is None: Q = e_s; Q = pd.DataFrame(Q) else: Q= Q.join(e_s) Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0) Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0)) Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0) Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']] Q.drop(columns=[e_s.columns[0]],inplace=True) return Q def get_dtypes(dataset_type): dtypes = {} if dataset_type == "FluxRaw": dtypes = { 'RECORD':'Int64', 'Fc_molar':float, 'Fc_mass':float, 'Fc_qc_grade':'Int64', 'Fc_samples_Tot':'Int64', 'LE':float, 'LE_qc_grade':'Int64', 'LE_samples_Tot':'Int64', 'H':float, 'H_qc_grade':'Int64', 'H_samples_Tot':'Int64', 'Rn':float, 'G_surface':float, 'energy_closure':float, 'Bowen_ratio':float, 'tau':float, 'tau_qc_grade':'Int64', 'u_star':float, 'T_star':float, 'TKE':float, 'amb_tmpr_Avg':float,
<filename>salt/cloud/clouds/nova.py # -*- coding: utf-8 -*- ''' OpenStack Nova Cloud Module =========================== OpenStack is an open source project that is in use by a number a cloud providers, each of which have their own ways of using it. The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack module for Salt Cloud, which uses a libcloud-based connection. The Nova module is designed to use the nova and glance modules already built into Salt. These modules use the Python novaclient and glanceclient libraries, respectively. In order to use this module, the proper salt configuration must also be in place. This can be specified in the master config, the minion config, a set of grains or a set of pillars. .. code-block:: yaml my_openstack_profile: keystone.user: admin keystone.password: <PASSWORD> keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' Note that there is currently a dependency upon netaddr. This can be installed on Debian-based systems by means of the python-netaddr package. This module currently requires the latest develop branch of Salt to be installed. This module has been tested to work with HP Cloud and Rackspace. See the documentation for specific options for either of these providers. These examples could be set up in the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/openstack.conf``: .. code-block:: yaml my-openstack-config: # The ID of the minion that will execute the salt nova functions auth_minion: myminion # The name of the configuration profile to use on said minion config_profile: my_openstack_profile ssh_key_name: mykey driver: nova userdata_file: /tmp/userdata.txt For local installations that only use private IP address ranges, the following option may be useful. Using the old syntax: Note: For api use, you will need an auth plugin. The base novaclient does not support apikeys, but some providers such as rackspace have extended keystone to accept them .. code-block:: yaml my-openstack-config: # Ignore IP addresses on this network for bootstrap ignore_cidr: 192.168.50.0/24 my-nova: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername password: <PASSWORD> tenant: <userid> driver: nova my-api: identity_url: 'https://identity.api.rackspacecloud.com/v2.0/' compute_region: IAD user: myusername api_key: <api_key> os_auth_plugin: rackspace tenant: <userid> driver: nova networks: - net-id: 47a38ff2-fe21-4800-8604-42bd1848e743 - net-id: 00000000-0000-0000-0000-000000000000 - net-id: 11111111-1111-1111-1111-111111111111 This is an example profile. .. code-block:: yaml debian8-2-iad-cloudqe4: provider: cloudqe4-iad size: performance1-2 image: Debian 8 (Jessie) (PVHVM) script_args: -UP -p python-zmq git 2015.8 and one using cinder volumes already attached .. code-block:: yaml # create the block storage device centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 # with the volume already created centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_volume: <volume id> # create the volume from a snapshot centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 snapshot: <cinder snapshot id> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 ephemeral: - size: 100 format: <swap/ext4> # create the create an extra ephemeral disk centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 swap: <size> Block Device can also be used for having more than one block storage device attached .. code-block:: yaml centos7-2-iad-rackspace: provider: rackspace-iad size: general1-2 block_device: - source: image id: <image_id> dest: volume size: 100 shutdown: <preserve/remove> bootindex: 0 - source: blank dest: volume device: xvdc size: 100 shutdown: <preserve/remove> Note: You must include the default net-ids when setting networks or the server will be created without the rest of the interfaces Note: For rackconnect v3, rackconnectv3 needs to be specified with the rackconnect v3 cloud network as its variable. ''' # pylint: disable=E0102 # Import python libs from __future__ import absolute_import import os import logging import socket import pprint import yaml # Import Salt Libs import salt.ext.six as six import salt.utils import salt.client from salt.utils.openstack import nova try: import novaclient.exceptions except ImportError as exc: pass # Import Salt Cloud Libs from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 import salt.utils.cloud import salt.utils.pycrypto as sup import salt.config as config from salt.utils import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) try: from netaddr import all_matching_cidrs HAS_NETADDR = True except ImportError: HAS_NETADDR = False # Get logging started log = logging.getLogger(__name__) request_log = logging.getLogger('requests') __virtualname__ = 'nova' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace script = namespaced_function(script, globals()) reboot = namespaced_function(reboot, globals()) # Only load in this module if the Nova configurations are in place def __virtual__(): ''' Check for Nova configurations ''' request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper())) if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user', 'tenant', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova(), } return config.check_driver_dependencies( __virtualname__, deps ) def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['user'] kwargs['project_id'] = vm_['tenant'] kwargs['auth_url'] = vm_['identity_url'] kwargs['region_name'] = vm_['compute_region'] if 'password' in vm_: kwargs['password'] = vm_['password'] conn = nova.SaltNova(**kwargs) return conn def avail_locations(conn=None, call=None): ''' Return a list of locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) if conn is None: conn = get_conn() endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints'] ret = {} for endpoint in endpoints: ret[endpoint['region']] = endpoint return ret def get_image(conn, vm_): ''' Return the image object to use ''' vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode( 'ascii', 'salt-cloud-force-ascii' ) if not vm_image: log.debug('No image set, must be boot from volume') return None image_list = conn.image_list() for img in image_list: if vm_image in (image_list[img]['id'], img): return image_list[img]['id'] try: image = conn.image_show(vm_image) return image['id'] except novaclient.exceptions.NotFound as exc: raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found: {1}'.format( vm_image, str(exc) ) ) def get_block_mapping_opts(vm_): ret = {} ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={}) ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[]) ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[]) ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None) ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None) ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None) return ret def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) conn = get_conn() node = conn.show_instance(name).__dict__ salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__) return node def get_size(conn, vm_): ''' Return the VM's size object ''' sizes = conn.list_sizes() vm_size = config.get_cloud_config_value('size', vm_, __opts__) if not vm_size: return sizes[0] for size in sizes: if vm_size and str(vm_size) in (str(sizes[size]['id']), str(size)): return sizes[size]['id'] raise SaltCloudNotFound( 'The specified size, \'{0}\', could not be found.'.format(vm_size) ) def preferred_ip(vm_, ips): ''' Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'. ''' proto = config.get_cloud_config_value( 'protocol', vm_, __opts__, default='ipv4', search_global=False ) family = socket.AF_INET if proto == 'ipv6': family = socket.AF_INET6 for ip in ips: try: socket.inet_pton(family, ip) return ip except Exception: continue return False def ignore_cidr(vm_, ip): ''' Return True if we are to ignore the specified IP. Compatible with IPv4. ''' if HAS_NETADDR is False: log.error('Error: netaddr is not installed') return 'Error: netaddr is not installed' cidr = config.get_cloud_config_value( 'ignore_cidr', vm_, __opts__, default='', search_global=False ) if cidr != '' and all_matching_cidrs(ip, [cidr]): log.warning( 'IP "{0}" found within "{1}"; ignoring it.'.format(ip, cidr) ) return True return False def ssh_interface(vm_): ''' Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' return config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) def rackconnect(vm_): ''' Determine if we should wait for rackconnect automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'rackconnect', vm_, __opts__, default=False, search_global=False ) def cloudnetwork(vm_): ''' Determine if we should use an extra network to bootstrap Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'cloudnetwork', vm_, __opts__, default='False', search_global=False ) def managedcloud(vm_): ''' Determine if we should wait for the managed cloud automation before running. Either 'False' (default) or 'True'. ''' return config.get_cloud_config_value( 'managedcloud', vm_, __opts__, default=False, search_global=False ) def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) salt.utils.cloud.fire_event( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), {'name': name}, transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to
import re from django.contrib.auth import authenticate from rest_framework import serializers, exceptions from rest_framework.exceptions import ValidationError from .models import User from .social.google_token_validator import GoogleValidate from .social.facebook_token_validator import FacebookValidate from .social.twitter_token_validator import TwitterValidate class RegistrationSerializer(serializers.ModelSerializer): """Serializers registration requests and creates a new user.""" password = serializers.CharField( write_only=True ) """ Overide default varidation to make sure users receive descriptive error messages """ email = serializers.EmailField() username = serializers.CharField() # The client should not be able to send a token along with a registration # request. Making `token` read-only handles that for us. class Meta: model = User # List all of the fields that could possibly be included in a request # or response, including fields specified explicitly above. fields = ['email', 'username', 'password'] def validate_password(self, password): """ User's password should be between 8 to 128 characters and should contain atleas one number and a capital letter """ msg = 'Passwords must be between 8 to 128 characters' if not len(password) > 8: raise serializers.ValidationError(msg) if not len(password) < 128: raise serializers.ValidationError(msg) if re.search('[0-9]', password) is None\ and re.search('[A-Z]', password) is None: raise serializers.ValidationError( 'Password must contain atleast one number and a capital letter' ) return password def validate_email(self, email): email_db = User.objects.filter(email=email) if email_db.exists(): raise serializers.ValidationError( 'A user with that email adress already exists' ) return email def validate_username(self, username): username_db = User.objects.filter(username=username) if username_db.exists(): raise serializers.ValidationError( 'The user name you entered is already taken, try another one' ) msg = 'User names must be between 3 and 10 characters' if not len(username) > 3: raise serializers.ValidationError(msg) if not len(username) < 10: raise serializers.ValidationError(msg) if re.match('^[A-Za-z0-9_]*$', username) is None: raise serializers.ValidationError( 'User names must be characters, letters and underscores only' ) return username def create(self, validated_data): # Use the `create_user` method we wrote earlier to create a new user. return User.objects.create_user(**validated_data) class LoginSerializer(serializers.Serializer): email = serializers.CharField(max_length=255) username = serializers.CharField(max_length=255, read_only=True) password = serializers.CharField(max_length=128, write_only=True) token = serializers.CharField(max_length=255, read_only=True) def validate(self, data): # The `validate` method is where we make sure that the current # instance of `LoginSerializer` has "valid". In the case of logging a # user in, this means validating that they've provided an email # and password and that this combination matches one of the users in # our database. email = data.get('email', None) password = data.get('password', None) # As mentioned above, an email is required. Raise an exception if an # email is not provided. if email is None: raise serializers.ValidationError( 'An email address is required to log in.' ) # As mentioned above, a password is required. Raise an exception if a # password is not provided. if password is None: raise serializers.ValidationError( 'A password is required to log in.' ) # The `authenticate` method is provided by Django and handles checking # for a user that matches this email/password combination. Notice how # we pass `email` as the `username` value. Remember that, in our User # model, we set `USERNAME_FIELD` as `email`. user = authenticate(username=email, password=password) # If no user was found matching this email/password combination then # `authenticate` will return `None`. Raise an exception in this case. if user is None: raise serializers.ValidationError( 'A user with this email and password was not found.' ) if user.is_superuser: user.is_verified = True user.save() if user.is_verified == False: raise serializers.ValidationError( 'Your email is not verified, Please check your email for a verification link' ) # Django provides a flag on our `User` model called `is_active`. The # purpose of this flag to tell us whether the user has been banned # or otherwise deactivated. This will almost never be the case, but # it is worth checking for. Raise an exception in this case. if not user.is_active: raise serializers.ValidationError( 'This user has been deactivated.' ) # The `validate` method should return a dictionary of validated data. # This is the data that is passed to the `create` and `update` methods # that we will see later on. return { 'email': user.email, 'username': user.username, 'token': user.token } class UserSerializer(serializers.ModelSerializer): """Handles serialization and deserialization of User objects.""" # Passwords must be at least 8 characters, but no more than 128 # characters. These values are the default provided by Django. We could # change them, but that would create extra work while introducing no real # benefit, so let's just stick with the defaults. password = serializers.CharField( max_length=128, min_length=8, write_only=True ) class Meta: model = User fields = ('email', 'username', 'password', 'token') # The `read_only_fields` option is an alternative for explicitly # specifying the field with `read_only=True` like we did for password # above. The reason we want to use `read_only_fields` here is because # we don't need to specify anything else about the field. For the # password field, we needed to specify the `min_length` and # `max_length` properties too, but that isn't the case for the token # field. def update(self, instance, validated_data): """Performs an update on a User.""" # Passwords should not be handled with `setattr`, unlike other fields. # This is because Django provides a function that handles hashing and # salting passwords, which is important for security. What that means # here is that we need to remove the password field from the # `validated_data` dictionary before iterating over it. password = validated_data.pop('password', None) for (key, value) in validated_data.items(): # For the keys remaining in `validated_data`, we will set them on # the current `User` instance one at a time. setattr(instance, key, value) if password is not None: # `.set_password()` is the method mentioned above. It handles all # of the security stuff that we shouldn't be concerned with. instance.set_password(password) # Finally, after everything has been updated, we must explicitly save # the model. It's worth pointing out that `.set_password()` does not # save the model. instance.save() return instance class PasswordResetSerializer(serializers.Serializer): """ serializer for requesting a password reset via email """ email = serializers.EmailField(max_length=255) def validate_email(self, email): """ check if the email entered has a corresponding user """ user = User.objects.filter(email=email).first() if not user: raise serializers.ValidationError( 'User with this email is not found' ) return email class PasswordResetConfirmSerializer(serializers.Serializer): """ serializer for requesting a new password """ password = serializers.CharField( max_length=128, min_length=8 ) confirm_password = serializers.CharField( max_length=128, min_length=8 ) class GoogleAuthSerializer(serializers.ModelSerializer): """ Serializer for the google social login/user creation """ auth_token = serializers.CharField() class Meta: model = User fields = ['auth_token'] def validate_auth_token(self, auth_token): """ Validate auth_token, decode the auth_token, retrieve user info """ decoded_google_user_data = GoogleValidate.validate_google_token( auth_token) if decoded_google_user_data is None: raise serializers.ValidationError( 'Invalid token please try again' ) if 'sub' not in decoded_google_user_data: raise serializers.ValidationError( 'Token is not valid or has expired. Please get a new one.' ) user = User.objects.filter( social_id=decoded_google_user_data.get('sub')) if not user.exists(): user_obj = { 'social_id': decoded_google_user_data.get('sub'), 'username': decoded_google_user_data.get('name', decoded_google_user_data['email']), 'email': decoded_google_user_data.get('email'), 'password': '<PASSWORD>' } try: User.objects.create_user(**user_obj) except: raise serializers.ValidationError( 'Failed to register the user. Email already exists in the database') authenticated_user = User.objects.get( social_id=decoded_google_user_data.get('sub')) return authenticated_user.token() class FacebookAuthSerializer(serializers.ModelSerializer): """ Serializer for the facebook social login/user creation """ auth_token = serializers.CharField() class Meta: model = User fields = ['auth_token'] def validate_auth_token(self, auth_token): """ Validate auth_token, decode the auth_token, retrieve user info """ facebook_user_data = FacebookValidate.validate_facebook_token( auth_token) if facebook_user_data is None: raise serializers.ValidationError( 'Invalid token please try again' ) if 'id' not in facebook_user_data: raise serializers.ValidationError( 'Token is not valid or has expired. Please get a new one.' ) user = User.objects.filter(social_id=facebook_user_data.get('id')) if not user.exists(): user_obj = { 'social_id': facebook_user_data.get('id'), 'username': facebook_user_data.get('name', facebook_user_data.get('email')), 'email': facebook_user_data.get('email'), 'password': '<PASSWORD>' } try: User.objects.create_user(**user_obj) except: raise serializers.ValidationError( 'Failed to register the user. Email already exists in the database') authenticated_user = User.objects.get( social_id=facebook_user_data.get('id')) return authenticated_user.token() class TwitterAuthSerializer(serializers.ModelSerializer): """ Serializer for the twitter social login/user creation """ auth_token = serializers.CharField() class Meta: model = User fields = ['auth_token'] def validate_auth_token(self, auth_token): twitter_user_data = TwitterValidate.validate_twitter_token( auth_token) if twitter_user_data is None: raise serializers.ValidationError( 'Invalid token please try again' ) if 'id_str' not in twitter_user_data:
self.show_quota_left is None: return self.event.settings.show_quota_left return self.show_quota_left def tax(self, price=None, base_price_is='auto', currency=None, invoice_address=None, override_tax_rate=None, include_bundled=False): price = price if price is not None else self.default_price if not self.tax_rule: t = TaxedPrice(gross=price, net=price, tax=Decimal('0.00'), rate=Decimal('0.00'), name='') else: t = self.tax_rule.tax(price, base_price_is=base_price_is, invoice_address=invoice_address, override_tax_rate=override_tax_rate, currency=currency or self.event.currency) if include_bundled: for b in self.bundles.all(): if b.designated_price and b.bundled_item.tax_rule_id != self.tax_rule_id: if b.bundled_variation: bprice = b.bundled_variation.tax(b.designated_price * b.count, base_price_is='gross', invoice_address=invoice_address, currency=currency) else: bprice = b.bundled_item.tax(b.designated_price * b.count, invoice_address=invoice_address, base_price_is='gross', currency=currency) compare_price = self.tax_rule.tax(b.designated_price * b.count, override_tax_rate=override_tax_rate, invoice_address=invoice_address, currency=currency) t.net += bprice.net - compare_price.net t.tax += bprice.tax - compare_price.tax t.name = "MIXED!" return t def is_available_by_time(self, now_dt: datetime=None) -> bool: now_dt = now_dt or now() if self.available_from and self.available_from > now_dt: return False if self.available_until and self.available_until < now_dt: return False return True def is_available(self, now_dt: datetime=None) -> bool: """ Returns whether this item is available according to its ``active`` flag and its ``available_from`` and ``available_until`` fields """ now_dt = now_dt or now() if not self.active or not self.is_available_by_time(now_dt): return False return True def _get_quotas(self, ignored_quotas=None, subevent=None): check_quotas = set(getattr( self, '_subevent_quotas', # Utilize cache in product list self.quotas.filter(subevent=subevent).select_related('subevent') if subevent else self.quotas.all() )) if ignored_quotas: check_quotas -= set(ignored_quotas) return check_quotas def check_quotas(self, ignored_quotas=None, count_waitinglist=True, subevent=None, _cache=None, include_bundled=False, trust_parameters=False, fail_on_no_quotas=False): """ This method is used to determine whether this Item is currently available for sale. :param ignored_quotas: If a collection if quota objects is given here, those quotas will be ignored in the calculation. If this leads to no quotas being checked at all, this method will return unlimited availability. :param include_bundled: Also take availability of bundled items into consideration. :param trust_parameters: Disable checking of the subevent parameter and disable checking if any variations exist (performance optimization). :returns: any of the return codes of :py:meth:`Quota.availability()`. :raises ValueError: if you call this on an item which has variations associated with it. Please use the method on the ItemVariation object you are interested in. """ if not trust_parameters and not subevent and self.event.has_subevents: raise TypeError('You need to supply a subevent.') check_quotas = self._get_quotas(ignored_quotas=ignored_quotas, subevent=subevent) quotacounter = Counter() res = Quota.AVAILABILITY_OK, None for q in check_quotas: quotacounter[q] += 1 if include_bundled: for b in self.bundles.all(): bundled_check_quotas = (b.bundled_variation or b.bundled_item)._get_quotas(ignored_quotas=ignored_quotas, subevent=subevent) if not bundled_check_quotas: return Quota.AVAILABILITY_GONE, 0 for q in bundled_check_quotas: quotacounter[q] += b.count for q, n in quotacounter.items(): if n == 0: continue a = q.availability(count_waitinglist=count_waitinglist, _cache=_cache) if a[1] is None: continue num_avail = a[1] // n code_avail = Quota.AVAILABILITY_GONE if a[1] >= 1 and num_avail < 1 else a[0] # this is not entirely accurate, as it shows "sold out" even if it is actually just "reserved", # since we do not know that distinction here if at least one item is available. However, this # is only relevant in connection with bundles. if code_avail < res[0] or res[1] is None or num_avail < res[1]: res = (code_avail, num_avail) if len(quotacounter) == 0: if fail_on_no_quotas: return Quota.AVAILABILITY_GONE, 0 return Quota.AVAILABILITY_OK, sys.maxsize # backwards compatibility return res def allow_delete(self): from pretix.base.models.orders import OrderPosition return not OrderPosition.all.filter(item=self).exists() @property def includes_mixed_tax_rate(self): for b in self.bundles.all(): if b.designated_price and b.bundled_item.tax_rule_id != self.tax_rule_id: return True return False @cached_property def has_variations(self): return self.variations.exists() @staticmethod def clean_per_order(min_per_order, max_per_order): if min_per_order is not None and max_per_order is not None: if min_per_order > max_per_order: raise ValidationError(_('The maximum number per order can not be lower than the minimum number per ' 'order.')) @staticmethod def clean_category(category, event): if category is not None and category.event is not None and category.event != event: raise ValidationError(_('The item\'s category must belong to the same event as the item.')) @staticmethod def clean_tax_rule(tax_rule, event): if tax_rule is not None and tax_rule.event is not None and tax_rule.event != event: raise ValidationError(_('The item\'s tax rule must belong to the same event as the item.')) @staticmethod def clean_available(from_date, until_date): if from_date is not None and until_date is not None: if from_date > until_date: raise ValidationError(_('The item\'s availability cannot end before it starts.')) @property def meta_data(self): data = {p.name: p.default for p in self.event.item_meta_properties.all()} if hasattr(self, 'meta_values_cached'): data.update({v.property.name: v.value for v in self.meta_values_cached}) else: data.update({v.property.name: v.value for v in self.meta_values.select_related('property').all()}) return OrderedDict((k, v) for k, v in sorted(data.items(), key=lambda k: k[0])) class ItemVariation(models.Model): """ A variation of a product. For example, if your item is 'T-Shirt' then an example for a variation would be 'T-Shirt XL'. :param item: The item this variation belongs to :type item: Item :param value: A string defining this variation :type value: str :param description: A short description :type description: str :param active: Whether this variation is being sold. :type active: bool :param default_price: This variation's default price :type default_price: decimal.Decimal :param original_price: The item's "original" price. Will not be used for any calculations, will just be shown. :type original_price: decimal.Decimal """ item = models.ForeignKey( Item, related_name='variations', on_delete=models.CASCADE ) value = I18nCharField( max_length=255, verbose_name=_('Description') ) active = models.BooleanField( default=True, verbose_name=_("Active"), ) description = I18nTextField( verbose_name=_("Description"), help_text=_("This is shown below the variation name in lists."), null=True, blank=True, ) position = models.PositiveIntegerField( default=0, verbose_name=_("Position") ) default_price = models.DecimalField( decimal_places=2, max_digits=7, null=True, blank=True, verbose_name=_("Default price"), ) original_price = models.DecimalField( verbose_name=_('Original price'), blank=True, null=True, max_digits=7, decimal_places=2, help_text=_('If set, this will be displayed next to the current price to show that the current price is a ' 'discounted one. This is just a cosmetic setting and will not actually impact pricing.') ) require_membership = models.BooleanField( verbose_name=_('Require a valid membership'), default=False, ) require_membership_types = models.ManyToManyField( 'MembershipType', verbose_name=_('Membership types'), blank=True, ) objects = ScopedManager(organizer='item__event__organizer') class Meta: verbose_name = _("Product variation") verbose_name_plural = _("Product variations") ordering = ("position", "id") def __str__(self): return str(self.value) @property def price(self): return self.default_price if self.default_price is not None else self.item.default_price def tax(self, price=None, base_price_is='auto', currency=None, include_bundled=False, override_tax_rate=None, invoice_address=None): price = price if price is not None else self.price if not self.item.tax_rule: t = TaxedPrice(gross=price, net=price, tax=Decimal('0.00'), rate=Decimal('0.00'), name='') else: t = self.item.tax_rule.tax(price, base_price_is=base_price_is, currency=currency, override_tax_rate=override_tax_rate, invoice_address=invoice_address) if include_bundled: for b in self.item.bundles.all(): if b.designated_price and b.bundled_item.tax_rule_id != self.item.tax_rule_id: if b.bundled_variation: bprice = b.bundled_variation.tax(b.designated_price * b.count, base_price_is='gross', currency=currency, invoice_address=invoice_address) else: bprice = b.bundled_item.tax(b.designated_price * b.count, base_price_is='gross', currency=currency, invoice_address=invoice_address) compare_price = self.item.tax_rule.tax(b.designated_price * b.count, base_price_is='gross', currency=currency, invoice_address=invoice_address) t.net += bprice.net - compare_price.net t.tax += bprice.tax - compare_price.tax t.name = "MIXED!" return t def delete(self, *args, **kwargs): self.vouchers.update(item=None, variation=None, quota=None) super().delete(*args, **kwargs) if self.item: self.item.event.cache.clear() def save(self, *args, **kwargs): super().save(*args, **kwargs) if self.item: self.item.event.cache.clear() def _get_quotas(self, ignored_quotas=None, subevent=None): check_quotas = set(getattr( self, '_subevent_quotas', # Utilize cache in product list self.quotas.filter(subevent=subevent).select_related('subevent') if subevent else self.quotas.all() )) if ignored_quotas: check_quotas -= set(ignored_quotas) return check_quotas def check_quotas(self, ignored_quotas=None, count_waitinglist=True, subevent=None, _cache=None, include_bundled=False, trust_parameters=False, fail_on_no_quotas=False) -> Tuple[int, int]: """ This method is used to determine whether this ItemVariation is currently available for sale in terms of quotas. :param ignored_quotas: If a collection if quota objects is given here, those quotas will be ignored in the calculation. If this leads to no quotas being checked at all, this method will return unlimited availability. :param count_waitinglist: If ``False``, waiting list entries will be ignored for quota calculation. :returns: any of the return codes of :py:meth:`Quota.availability()`. """ if not trust_parameters and not subevent and self.item.event.has_subevents: # NOQA raise TypeError('You need to supply a subevent.') check_quotas = self._get_quotas(ignored_quotas=ignored_quotas, subevent=subevent) quotacounter = Counter() res = Quota.AVAILABILITY_OK, None for q in check_quotas: quotacounter[q] += 1 if include_bundled: for b in self.item.bundles.all(): bundled_check_quotas = (b.bundled_variation or b.bundled_item)._get_quotas(ignored_quotas=ignored_quotas, subevent=subevent) if not bundled_check_quotas: return Quota.AVAILABILITY_GONE, 0 for q in bundled_check_quotas: quotacounter[q] += b.count for q, n in quotacounter.items(): a = q.availability(count_waitinglist=count_waitinglist, _cache=_cache) if a[1] is None: continue num_avail = a[1] // n code_avail = Quota.AVAILABILITY_GONE if a[1] >= 1 and num_avail < 1 else a[0] # this is not entirely accurate, as it shows "sold out" even if it is actually just "reserved", # since we do not know that distinction here if at least one item is available. However, this # is
import dill from munch import Munch import numpy as np import os from .core_record import CoreRecord from .utils import load_test, read_pred_file, tokenize, \ sequence_tasks, DataShape, is_1d_list, is_2d_list, ACCUMULATED_STR, \ TOKENIZATION_DICT from .tests import AbstractTest, BasicClassificationMetrics, BasicSeqMetrics from typing import Dict, List from enum import Enum from collections import namedtuple, defaultdict import logging from copy import deepcopy logger = logging.getLogger(__name__) # EXAMPLE OF A METADATA: # {'TEMPLATE': 'As a child, @person@ had big dreams.', 'DOMAIN': 'personal', # 'CLASS': 'neut', 'EXTENDABLE': '', 'CORPUS': '', 'IDENTITY_KEY': 'person', # 0: 'white', 1: 'other_race', 'SAMPLE': {'scientists': 'phonetician', # 'pos_adj_event': 'wonderful', 'neg_adj_event': 'frightening', 'neg_v_2b_vbd': # 'despised', 'other_race': 'Anna, who is latino', 'white': 'Anna, who is # white', 'neg_adj_personstate': 'horrified', 'neg_anger_adj_personstate': # 'llivid', 'kiritchenko_emotional_state': 'disappointed', 'pos_v_2b_vbd': # 'loved', 'kiritchenko_emotional_situation': 'depressing'}, 'SAMPLE_n': 0} CacheEntry = namedtuple( 'CacheEntry', ['data_filter', 'data_shape', 'prob_based', 'drop_none_labels', 'flatten_method']) class EvaluationCore(): def __init__( self, data, labels_dict, meta=None, name=None, capability=None, description=None, tests=None, non_matched_data=False, group_names=None ): """ A class which is used to store the data, metadata and labels. It can also load model's predictions for the data it holds and 'tie' it to the data. It is equivalent to the checklist test, modulo all test-specific functionality (running specific tests, getting results etc.) and reuses a lot of original checklist code. This class is a result of decoupling the data and prediction management from getting the results for a specific type of test. TODO: subsampling doesn't work for now. Parameters ---------- data : list List or list(lists) of whatever the model takes as input. Strings, tuples, etc. labels_dict : a dictionary mapping a name of the task to: a Munch with nclasses field and labels field: a single value (int, str, etc) or list If list, must be the same length as data meta : list metadata for examples, must be the same length as data name : str test name capability : str test capability description : str test description non_matched_data: bool Indicates whether the data is 'counterfactual': many different version of the sentence, one for each group (True) or 'non-matched' : each group has its own set of sentences and labels. group_names: List[str] Provide when non_matched_data is True. """ self.data = data self.labels_dict = labels_dict self.labels = None if non_matched_data: self.data_structure = DataShape.GROUPED self.group_names = group_names else: self.data_structure = DataShape.UNGROUPED self.group_names = None self.meta = meta self.run_idxs = None self.result_indexes = None self.name = name self.capability = capability self.description = description self.tests = tests self.label_vocab = None self.results = None # cache of core records processed in different ways self.core_record_cache = {} def set_tests(self, tests): """ Set the tests/metrics that should be used for evaluation. """ self.tests = tests def save(self, file): with open(file, 'wb') as f: dill.dump(self, f, recurse=True) @staticmethod def from_file(file): return load_test(file) def example_list_and_indices(self, n=None, seed=None): """Subsamples test cases Parameters ---------- n : int Number of testcases to sample seed : int Seed to use Returns ------- tuple(list, list) First list is a list of examples Second list maps examples to testcases. For example, let's say we have two testcases: [a, b, c] and [d, e]. The first list will be [a, b, c, d, e] the second list will be [0, 0, 0, 1, 1] Also updates self.run_idxs if n is not None to indicate which testcases were run. Also updates self.result_indexes with the second list. """ if seed is not None: np.random.seed(seed) self.run_idxs = None idxs = list(range(len(self.data))) if n is not None: idxs = np.random.choice(idxs, min(n, len(idxs)), replace=False) self.run_idxs = idxs if is_1d_list(self.data[0]): all_data = [ (i, y, m) for i in idxs for (y, m) in zip(self.data[i], self.meta[i])] result_indexes, examples, meta = map(list, list(zip(*all_data))) # e.g. for each tempalte there are many groups and each group # can be represented with many terms elif is_2d_list(self.data[0]): all_data = [] for i in idxs: example_data = self.data[i] example_meta = self.meta[i] for group_idx, sent_list in enumerate(example_data): for y in sent_list: all_data.append(((i, group_idx), y, example_meta)) result_indexes, examples, meta = map(list, list(zip(*all_data))) else: examples = [self.data[i] for i in idxs] meta = [self.meta[i] for i in idxs] result_indexes = idxs # list(range(len(self.data))) self.result_indexes = result_indexes return examples, meta, result_indexes # def recover_example_list_and_indices(self): # """Recovers a previously computed example_list_and_indices""" # idxs = list(range(len(self.data))) # if self.run_idxs is not None: # idxs = self.run_idxs # if is_1d_list(self.data[0]): # examples = [y for i in idxs for y in self.data[i]] # meta = [y for i in idxs for y in self.meta[i]] # elif is_2d_list(self.data[0]): # pass # # TODO # else: # examples = [self.data[i] for i in idxs] # result_indexes = self.result_indexes # return examples, result_indexes def update_results_from_preds(self, preds, confs): """Updates results from preds and confs Assumes that example_lists_and_indices or to_raw_examples or to_raw_file was called before, so that self.result_indexes exists Parameters ---------- preds : list Predictions confs : list Confidences Updates self.results.preds and self.results.confs """ result_indexes = self.result_indexes if is_1d_list(self.data[0]): self.results.preds = [[] for _ in self.data] self.results.confs = [[] for _ in self.data] for i, p, c in zip(result_indexes, preds, confs): self.results.preds[i].append(p) self.results.confs[i].append(c) elif is_2d_list(self.data[0]): self.results.preds = [[[] for _ in x] for x in self.data] self.results.confs = [[[] for _ in x] for x in self.data] for (i, j), p, c in zip(result_indexes, preds, confs): self.results.preds[i][j].append(p) self.results.confs[i][j].append(c) else: self.results.preds = [None for _ in self.data] self.results.confs = [None for _ in self.data] for i, p, c in zip(result_indexes, preds, confs): self.results.preds[i] = p self.results.confs[i] = c def to_raw_examples( self, file_format=None, format_fn=None, n=None, seed=None, new_sample=True ): """Flattens all test examples into a single list Parameters ---------- file_format : string, must be one of 'jsonl', 'tsv', or None None just calls str(x) for each example in self.data format_fn : function or None If not None, call this function to format each example in self.data n : int If not None, number of samples to draw seed : int Seed to use if n is not None new_sample: bool If False, will rely on a previous sample and ignore the 'n' and 'seed' parameters Returns ------- list(string) List of all examples. Indices of example to test case will be stored in self.result_indexes. If n is not None, self.run_idxs will store the test case indexes. """ if file_format == 'jsonl': import json format_fn = lambda x, m: json.dumps(x) elif file_format == 'tsv': format_fn = lambda x, m: '\t'.join(x).replace('\n', ' ') else: if format_fn is None: format_fn = lambda x, m: str(x).replace('\n', ' ') if new_sample: examples, meta, indices =\ self.example_list_and_indices(n, seed=seed) else: raise Exception('Only new samples are supported.') # examples, indices = self.recover_example_list_and_indices() examples = [format_fn(x, m) for x, m in zip(examples, meta)] return examples def to_raw_file( self, path, task, file_format=None, format_fn=str, header=None, n=None, seed=None ): """Flatten test cases into individual examples and print them to file. Indices of example to test case will be stored in self.result_indexes. If n is not None, self.run_idxs will store the test case indexes. Parameters ---------- path : string File path file_format : string, must be one of 'jsonl', 'tsv', or None None just calls str(x) for each example in self.data format_fn : function or None If not None, call this function to format each example in self.data header : string If not None, first line of file n : int If not None, number of samples to draw seed : int Seed to use if n is not None """ # file_format can be jsonl, TODO # format_fn takes an example and outputs a line(s) in the file ret = '' if header is not None: ret += header.strip('\n') + '\n' if task in sequence_tasks: if format_fn is not None: logger.warning("Replacing given format_fn with a tokenizer.") # if the data is pre-tokenized and this is recorded in the # metadata, use that tokenization (this ensures the per-token # labels match the predictions from the model) format_fn =\ lambda x, m: "\n".join(m[TOKENIZATION_DICT][x]) + "\n" \ if TOKENIZATION_DICT in m \ else "\n".join(tokenize(x)) + "\n" examples = self.to_raw_examples( file_format=file_format, format_fn=format_fn, n=n, seed=seed) ret +=
as separator ta = t.split('-') # Make sure there are no singletons or private use flags for tg in ta: if len(tg) < 2: return False # If we got here, tag checks out return True # Check whether the given parameter is a string that is a case-sensitive # match for one of the valid category names. # # Parameters: # # cname : str | mixed - the value to check # # Return: # # True if value is a string that is recognized, False otherwise # def valid_category(cname): if not isinstance(cname, str): return False if (cname == 'language') or (cname == 'extlang') or \ (cname =='script') or (cname == 'region') or \ (cname == 'variant') or (cname == 'grandfathered') or \ (cname == 'redundant'): return True else: return False # Check whether the given string value has leading or trailing padding. # # This returns True if the first or last character is a space, tab, or # line break. Otherwise, it returns False. Empty strings return False. # Non-strings cause an exception. # # Parameters: # # s : str - the string value to check # # Return: # # True if value is padded, False if not # def has_padding(s): # Check parameter if not isinstance(s, str): raise LogicError() # Empty strings return False if len(s) < 1: return False # Check first and last character for x in range(0, 2): # Get appropriate character c = None if x == 0: c = s[0] elif x == 1: c = s[-1] else: raise LogicError() # shouldn't happen # Check that character is not space, tab, or line break if (c == ' ') or (c == '\t') or (c == '\n'): return True # If we got here, string is not padded return False # Check that a parsed record conforms to various expectations. # # Exceptions are thrown if there are problems with the record. The # LogicError exception is used for situations that should never be # possible from any input data. # # Parameters: # # lnum : int - a line number, greater than zero, at which the record # starts, which is used for error reporting # # f : dict - maps lowercased record field names to their values # def check_record(lnum, f): # Check parameters if not isinstance(lnum, int): raise LogicError() if lnum < 1: raise LogicError() if not isinstance(f, dict): raise LogicError() # Main check of all keys and values in dictionary for k in list(f): # Each key must be a string if not isinstance(k, str): raise LogicError() # Each key should be non-empty if len(k) < 1: raise EmptyFieldName(lnum) # Each key must be non-padded if has_padding(k): raise LogicError() # Each key must be only in lowercase and have at least one lowercase # letter if not k.islower(): raise InvalidFieldName(lnum, k) # Each value must be a string without padding, except that # "description" "comments" and "prefix" field values must be # non-empty lists of strings without padding val = f[k] if (k == 'description') or (k == 'comments') or (k == 'prefix'): # Value must be non-empty list of strings without padding if not isinstance(val, list): raise LogicError() if len(val) < 1: raise LogicError() for e in val: if not isinstance(e, str): raise LogicError() if has_padding(e): raise LogicError() else: # Value must be string without padding if not isinstance(val, str): raise LogicError() if has_padding(val): raise LogicError() # All records must have a "type" field that is one of the recognized # categories if 'type' not in f: raise MissingTypeError(lnum) if not valid_category(f['type']): raise BadRecordType(lnum, f['type']) # Grandfathered or redundant records must have a "tag" field but not a # "subtag" field, while all other records must have a "subtag" field # but not a "tag" field if (f['type'] == 'grandfathered') or (f['type'] == 'redundant'): # Must have tag field but not subtag if ('tag' not in f) or ('subtag' in f): raise WrongTagTypeError(lnum) else: # Must have subtag field but not tag if ('subtag' not in f) or ('tag' in f): raise WrongTagTypeError(lnum) # If this is a subtag record, check the subtag value format if 'subtag' in f: ft = f['type'] sv = f['subtag'] if ft == 'language': # Languages must be two or three lowercase ASCII letters (language # tags that are longer are not used in practice); the only # exception is 8-character language ranges where the first three # chars are lowercase letters, the last three chars are lowercase # letters, and the middle two chars are ".." if ((len(sv) < 2) or (len(sv) > 3)) and (len(sv) != 8): raise BadLanguageSubtag(lnum, sv) if len(sv) == 8: if (not is_lower_letter(sv[0])) or \ (not is_lower_letter(sv[1])) or \ (not is_lower_letter(sv[2])) or \ (sv[3] != '.') or (sv[4] != '.') or \ (not is_lower_letter(sv[5])) or \ (not is_lower_letter(sv[6])) or \ (not is_lower_letter(sv[7])): raise BadLanguageSubtag(lnum, sv) else: for c in sv: if not is_lower_letter(c): raise BadLanguageSubtag(lnum, sv) elif ft == 'extlang': # extlang subtags must be three lowercase ASCII letters if len(sv) != 3: raise BadExtlangSubtag(lnum, sv) for c in sv: if not is_lower_letter(c): raise BadExtlangSubtag(lnum, sv) elif ft == 'script': # Script subtags must be four ASCII letters, the first of which is # uppercase and the rest of which are lowercase; the only # exception is 10-character script subtag ranges, where the first # four letters are a valid script tag, the last four letters are a # valid script subtag, and the middle two characters are ".." if len(sv) == 4: if (not is_upper_letter(sv[0])) or \ (not is_lower_letter(sv[1])) or \ (not is_lower_letter(sv[2])) or \ (not is_lower_letter(sv[3])): raise BadScriptSubtag(lnum, sv) elif len(sv) == 10: if (not is_upper_letter(sv[0])) or \ (not is_lower_letter(sv[1])) or \ (not is_lower_letter(sv[2])) or \ (not is_lower_letter(sv[3])) or \ (sv[4] != '.') or (sv[5] != '.') or \ (not is_upper_letter(sv[6])) or \ (not is_lower_letter(sv[7])) or \ (not is_lower_letter(sv[8])) or \ (not is_lower_letter(sv[9])): raise BadScriptSubtag(lnum, sv) else: raise BadScriptSubtag(lnum, sv) elif ft == 'region': # Region subtags must be two uppercase ASCII letters or three # ASCII digits or they must be a range if len(sv) == 2: if (not is_upper_letter(sv[0])) or (not is_upper_letter(sv[1])): raise BadRegionSubtag(lnum, sv) elif len(sv) == 3: for c in sv: if not is_digit(c): raise BadRegionSubtag(lnum, sv) elif len(sv) == 6: if (not is_upper_letter(sv[0])) or \ (not is_upper_letter(sv[1])) or \ (sv[2] != '.') or (sv[3] != '.') or \ (not is_upper_letter(sv[4])) or \ (not is_upper_letter(sv[5])): raise BadRegionSubtag(lnum, sv) else: raise BadRegionSubtag(lnum, sv) elif ft == 'variant': # Variants must either be four lowercase ASCII alphanumerics and # begin with a digit, or 5-8 lowercase ASCII alphanumerics if (len(sv) < 4) or (len(sv) > 8): raise BadVariantSubtag(lnum, sv) if len(sv) == 4: if not is_digit(sv[0]): raise BadVariantSubtag(lnum, sv) for c in sv: if (not is_lower_letter(c)) and (not is_digit(c)): raise BadVariantSubtag(lnum, sv) else: raise LogicError() # shouldn't happen # If this is a tag record, check tag format if 'tag' in f: if not is_format_tag(f['tag']): raise BadTagFormat(lnum, f['tag']) # If this record has prefixes, additional checks if 'prefix' in f: # Prefixes only possible on extlang and variant records if (f['type'] != 'extlang') and (f['type'] != 'variant'): raise PrefixContextError(lnum) # If this is an extlang record, no more than one prefix allowed if f['type'] == 'extlang': if len(f['prefix']) > 1: raise PrefixMultiError(lnum) # All prefix values must be two or three lowercase letters for # extlang prefixes if f['type'] == 'extlang': for p in f['prefix']: if (len(p) < 2) or (len(p) > 3): raise BadPrefix(lnum, p) for c in p: if not is_lower_letter(c): raise BadPrefix(lnum, p) # All prefix values must be core tags for variant records if f['type'] == 'variant': for p in f['prefix']: if not is_core_tag(p): raise BadPrefix(lnum, p) # If this
''' BreezySLAM: Simple, efficient SLAM in Python algorithms.py: SLAM algorithms Copyright (C) 2014 <NAME> This code is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this code. If not, see <http:#www.gnu.org/licenses/>. ''' import pybreezyslam import math import time # Basic params _DEFAULT_MAP_QUALITY = 2 # out of 255 _DEFAULT_HOLE_WIDTH_MM = 320 # Random mutation hill-climbing (RMHC) params _DEFAULT_SIGMA_XY_MM = 100 _DEFAULT_SIGMA_THETA_DEGREES = 20 _DEFAULT_MAX_SEARCH_ITER = 900 # CoreSLAM class ------------------------------------------------------------------------------------------------------ class CoreSLAM(object): ''' CoreSLAM is an abstract class that uses the classes Position, Map, Scan, and Laser to run variants of the simple CoreSLAM (tinySLAM) algorithm described in @inproceedings{coreslam-2010, author = {<NAME> and <NAME>}, title = {CoreSLAM: a SLAM Algorithm in less than 200 lines of C code}, booktitle = {11th International Conference on Control, Automation, Robotics and Vision, ICARCV 2010, Singapore, 7-10 December 2010, Proceedings}, pages = {1975-1979}, publisher = {IEEE}, year = {2010} } Implementing classes should provide the method _updateMapAndPointcloud(scan_mm, dxy_mm, dtheta_degrees, should_update_map) to update the point-cloud (particle cloud) and map (if should_update_map true) ''' def __init__(self, laser, map_size_pixels, map_size_meters, map_quality=_DEFAULT_MAP_QUALITY, hole_width_mm=_DEFAULT_HOLE_WIDTH_MM): ''' Creates a CoreSLAM object suitable for updating with new Lidar and odometry data. laser is a Laser object representing the specifications of your Lidar unit map_size_pixels is the size of the square map in pixels map_size_meters is the size of the square map in meters quality from 0 through 255 determines integration speed of scan into map hole_width_mm determines width of obstacles (walls) ''' # Initialize parameters self.map_quality = map_quality self.hole_width_mm = hole_width_mm # Store laser for later self.laser = laser # Initialize a scan for computing distance to map, and one for updating map self.scan_for_distance = pybreezyslam.Scan(laser, 1) self.scan_for_mapbuild = pybreezyslam.Scan(laser, 3) # Initialize the map self.map = pybreezyslam.Map(map_size_pixels, map_size_meters) def update(self, scans_mm, pose_change, scan_angles_degrees=None, should_update_map=True): ''' Updates the scan and odometry, and calls the the implementing class's _updateMapAndPointcloud method with the specified pose change. scan_mm is a list of Lidar scan values, whose count is specified in the scan_size attribute of the Laser object passed to the CoreSlam constructor pose_change is a tuple (dxy_mm, dtheta_degrees, dt_seconds) computed from odometry scan_angles_degrees is an optional list of angles corresponding to the distances in scans_mm should_update_map flags for whether you want to update the map ''' # Convert pose change (dxy,dtheta,dt) to velocities (dxy/dt, dtheta/dt) for scan update velocity_factor = (1 / pose_change[2]) if (pose_change[2] > 0) else 0 # units => units/sec dxy_mm_dt = pose_change[0] * velocity_factor dtheta_degrees_dt = pose_change[1] * velocity_factor velocities = (dxy_mm_dt, dtheta_degrees_dt) # Build a scan for computing distance to map, and one for updating map self._scan_update(self.scan_for_mapbuild, scans_mm, velocities, scan_angles_degrees) self._scan_update(self.scan_for_distance, scans_mm, velocities, scan_angles_degrees) # Implementing class updates map and pointcloud self._updateMapAndPointcloud(pose_change[0], pose_change[1], should_update_map) def getmap(self, mapbytes): ''' Fills bytearray mapbytes with current map pixels, where bytearray length is square of map size passed to CoreSLAM.__init__(). ''' self.map.get(mapbytes) def setmap(self, mapbytes): ''' Sets current map pixels to values in bytearray, where bytearray length is square of map size passed to CoreSLAM.__init__(). ''' self.map.set(mapbytes) def __str__(self): return 'CoreSLAM: %s \n map quality = %d / 255 \n hole width = %7.0f mm' % \ (str(self.map), self.map_quality, self.hole_width_mm) def __repr__(self): return self.__str__() def _scan_update(self, scan, scans_distances_mm, velocities, scan_angles_degrees): scan.update(scans_mm=scans_distances_mm, hole_width_mm=self.hole_width_mm, velocities=velocities, scan_angles_degrees=scan_angles_degrees) # SinglePositionSLAM class --------------------------------------------------------------------------------------------- class SinglePositionSLAM(CoreSLAM): ''' SinglePositionSLAM is an abstract class that implements CoreSLAM using a point-cloud with a single point (position). Implementing classes should provide the method _getNewPosition(self, start_position) to compute a new position based on searching from a starting position. ''' def __init__(self, laser, map_size_pixels, map_size_meters, map_quality=_DEFAULT_MAP_QUALITY, hole_width_mm=_DEFAULT_HOLE_WIDTH_MM): CoreSLAM.__init__(self, laser, map_size_pixels, map_size_meters, map_quality, hole_width_mm) # Initialize the position (x, y, theta) init_coord_mm = 500 * map_size_meters # center of map self.position = pybreezyslam.Position(init_coord_mm, init_coord_mm, 0) def _updateMapAndPointcloud(self, dxy_mm, dtheta_degrees, should_update_map): ''' Updates the map and point-cloud (particle cloud). Called automatically by CoreSLAM.update() velocities is a tuple of the form (dxy_mm, dtheta_degrees, dt_seconds). ''' # Start at current position start_pos = self.position.copy() # Add effect of velocities start_pos.x_mm += dxy_mm * self._costheta() start_pos.y_mm += dxy_mm * self._sintheta() start_pos.theta_degrees += dtheta_degrees # Add offset from laser start_pos.x_mm += self.laser.offset_mm * self._costheta() start_pos.y_mm += self.laser.offset_mm * self._sintheta() # Get new position from implementing class new_position = self._getNewPosition(start_pos) # Update the current position with this new position, adjusted by laser offset self.position = new_position.copy() self.position.x_mm -= self.laser.offset_mm * self._costheta() self.position.y_mm -= self.laser.offset_mm * self._sintheta() # Update the map with this new position if indicated if should_update_map: self.map.update(self.scan_for_mapbuild, new_position, self.map_quality, self.hole_width_mm) def getpos(self): ''' Returns current position as a tuple (x_mm, y_mm, theta_degrees) ''' return (self.position.x_mm, self.position.y_mm, self.position.theta_degrees) def _costheta(self): return math.cos(self._thetaradians()) def _sintheta(self): return math.sin(self._thetaradians()) def _thetaradians(self): return math.radians(self.position.theta_degrees) # RMHC_SLAM class ------------------------------------------------------------------------------------------------------ class RMHC_SLAM(SinglePositionSLAM): ''' RMHC_SLAM implements the _getNewPosition() method of SinglePositionSLAM using Random-Mutation Hill-Climbing search. Uses its own internal pseudorandom-number generator for efficiency. ''' def __init__(self, laser, map_size_pixels, map_size_meters, map_quality=_DEFAULT_MAP_QUALITY, hole_width_mm=_DEFAULT_HOLE_WIDTH_MM, random_seed=None, sigma_xy_mm=_DEFAULT_SIGMA_XY_MM, sigma_theta_degrees=_DEFAULT_SIGMA_THETA_DEGREES, max_search_iter=_DEFAULT_MAX_SEARCH_ITER): ''' Creates a RMHCSlam object suitable for updating with new Lidar and odometry data. laser is a Laser object representing the specifications of your Lidar unit map_size_pixels is the size of the square map in pixels map_size_meters is the size of the square map in meters quality from 0 through 255 determines integration speed of scan into map hole_width_mm determines width of obstacles (walls) random_seed supports reproducible results; defaults to system time if unspecified sigma_xy_mm specifies the standard deviation in millimeters of the normal distribution of the (X,Y) component of position for RMHC search sigma_theta_degrees specifies the standard deviation in degrees of the normal distribution of the rotational component of position for RMHC search max_search_iter specifies the maximum number of iterations for RMHC search ''' SinglePositionSLAM.__init__(self, laser, map_size_pixels, map_size_meters, map_quality, hole_width_mm) if not random_seed: random_seed = int(time.time()) & 0xFFFF self.randomizer = pybreezyslam.Randomizer(random_seed) self.sigma_xy_mm = sigma_xy_mm self.sigma_theta_degrees = sigma_theta_degrees self.max_search_iter = max_search_iter def update(self, scans_mm, pose_change=None, scan_angles_degrees=None, should_update_map=True): if not pose_change: pose_change = (0, 0, 0) CoreSLAM.update(self, scans_mm, pose_change, scan_angles_degrees, should_update_map) def _getNewPosition(self, start_position): ''' Implements the _getNewPosition() method of SinglePositionSLAM. Uses Random-Mutation Hill-Climbing search to look for a better position based on a starting position. ''' # RMHC search is implemented as a C extension for efficiency return pybreezyslam.rmhcPositionSearch( start_position, self.map, self.scan_for_distance, self.laser, self.sigma_xy_mm, self.sigma_theta_degrees, self.max_search_iter, self.randomizer) def _random_normal(self, mu, sigma): return mu + self.randomizer.rnor() * sigma # Deterministic_SLAM class ------------------------------------------------------------------------------------ class Deterministic_SLAM(SinglePositionSLAM): ''' Deterministic_SLAM implements the _getNewPosition() method of SinglePositionSLAM by simply copying the search-start position. ''' def __init__(self, laser, map_size_pixels, map_size_meters, map_quality=_DEFAULT_MAP_QUALITY, hole_width_mm=_DEFAULT_HOLE_WIDTH_MM): ''' Creates a Deterministic_Slam object suitable for updating with new Lidar and odometry data. laser is a Laser object representing the specifications of your Lidar unit map_size_pixels is the size of the square map in pixels map_size_meters is the size of the square map in meters quality from 0 through 255 determines integration speed of scan into map hole_width_mm determines width of obstacles (walls) ''' SinglePositionSLAM.__init__(self, laser, map_size_pixels, map_size_meters, map_quality, hole_width_mm) def _getNewPosition(self, start_position): ''' Implements the _getNewPosition() method of SinglePositionSLAM. Returns a copy
[195, 195, 102, 102, 60, 60, 24, 24], 57732: [24, 24, 60, 60, 102, 102, 195, 195], 57733: [0, 0, 0, 16, 0, 0, 0, 0], 57734: [0, 63, 63, 63, 63, 63, 63, 0], 57735: [0, 252, 252, 252, 252, 252, 252, 0], 57736: [0, 84, 42, 84, 42, 84, 42, 0], 57737: [0, 0, 24, 60, 60, 126, 126, 126], 57738: [126, 126, 126, 60, 60, 24, 0, 0], 57739: [7, 31, 63, 127, 127, 255, 255, 255], 57740: [224, 248, 252, 254, 254, 255, 255, 255], 57741: [255, 255, 255, 254, 254, 252, 248, 224], 57742: [255, 255, 255, 127, 127, 63, 31, 7], 57743: [36, 66, 129, 0, 0, 129, 66, 36], 57744: [0, 0, 2, 0, 0, 0, 32, 0], 57745: [32, 0, 2, 0, 32, 0, 2, 0], 57746: [34, 0, 136, 0, 34, 0, 136, 0], 57747: [136, 0, 85, 0, 34, 0, 85, 0], 57748: [255, 187, 255, 238, 255, 187, 255, 238], 57749: [191, 255, 251, 255, 191, 255, 251, 255], 57750: [255, 255, 223, 255, 255, 255, 253, 255], 57751: [170, 0, 170, 0, 170, 0, 170, 0], 57752: [0, 68, 0, 0, 0, 68, 0, 0], 57753: [204, 204, 51, 51, 204, 204, 51, 51], 57754: [51, 102, 204, 153, 51, 102, 204, 153], 57755: [204, 102, 51, 153, 204, 102, 51, 153], 59392: [192, 192, 192, 255, 255, 192, 192, 192], 59393: [255, 255, 24, 24, 24, 24, 24, 24], 59394: [3, 3, 3, 255, 255, 3, 3, 3], 59395: [24, 24, 24, 24, 24, 24, 255, 255], 59396: [255, 255, 192, 192, 0, 0, 0, 0], 59397: [240, 240, 192, 192, 192, 192, 192, 192], 59398: [255, 255, 3, 3, 0, 0, 0, 0], 59399: [15, 15, 3, 3, 3, 3, 3, 3], 59400: [0, 0, 0, 0, 3, 3, 255, 255], 59401: [3, 3, 3, 3, 3, 3, 15, 15], 59402: [192, 192, 192, 192, 192, 192, 255, 255], 59403: [192, 192, 192, 255, 255, 0, 0, 0], 59404: [0, 0, 0, 255, 255, 192, 192, 192], 59405: [31, 31, 24, 24, 24, 24, 24, 24], 59406: [248, 248, 24, 24, 24, 24, 24, 24], 59407: [0, 0, 0, 255, 255, 3, 3, 3], 59408: [3, 3, 3, 255, 255, 0, 0, 0], 59409: [24, 24, 24, 24, 24, 24, 248, 248], 59410: [24, 24, 24, 24, 24, 24, 31, 31], 59411: [192, 112, 28, 6, 6, 3, 3, 1], 59412: [128, 192, 192, 96, 96, 56, 14, 3], 59413: [3, 14, 56, 96, 96, 192, 192, 128], 59414: [1, 3, 3, 6, 6, 28, 112, 192], 59415: [24, 48, 96, 192, 128, 0, 0, 0], 59416: [24, 12, 6, 3, 1, 0, 0, 0], 59417: [24, 60, 102, 195, 129, 0, 0, 0], 59418: [0, 0, 0, 1, 3, 6, 12, 24], 59419: [24, 48, 96, 193, 131, 6, 12, 24], 59420: [24, 12, 6, 3, 3, 6, 12, 24], 59421: [24, 60, 102, 195, 131, 6, 12, 24], 59422: [0, 0, 0, 128, 192, 96, 48, 24], 59423: [0, 0, 0, 129, 195, 102, 60, 24], 59424: [24, 48, 96, 193, 195, 102, 60, 24], 59425: [24, 12, 6, 131, 195, 102, 60, 24], 59426: [192, 96, 48, 24, 0, 0, 0, 0], 59427: [3, 6, 12, 24, 0, 0, 0, 0], 59428: [195, 102, 60, 24, 0, 0, 0, 0], 59429: [0, 0, 0, 0, 12, 6, 3, 1], 59430: [192, 96, 48, 24, 12, 6, 3, 1], 59431: [3, 6, 12, 24, 12, 6, 3, 1], 59432: [195, 102, 60, 24, 12, 6, 3, 1], 59433: [0, 0, 0, 0, 48, 96, 192, 128], 59434: [0, 0, 0, 0, 60, 102, 195, 129], 59435: [192, 96, 48, 24, 60, 102, 195, 129], 59436: [3, 6, 12, 24, 60, 102, 195, 129], 59437: [192, 96, 48, 248, 248, 96, 192, 128], 59438: [24, 24, 24, 248, 252, 6, 3, 1], 59439: [24, 24, 24, 24, 60, 102, 195, 129], 59440: [27, 30, 28, 31, 31, 0, 0, 0], 59441: [3, 6, 12, 31, 31, 6, 3, 1], 59442: [192, 96, 48, 31, 31, 24, 24, 24], 59443: [195, 102, 60, 24, 24, 24, 24, 24], 59444: [0, 0, 0, 248, 248, 120, 216, 152], 59445: [192, 96, 48, 248, 248, 0, 0, 0], 59446: [0, 0, 0, 248, 248, 96, 192, 128], 59447: [24, 24, 24, 24, 48, 96, 192, 128], 59448: [24, 24, 24, 24, 12, 6, 3, 1], 59449: [0, 0, 0, 31, 31, 6, 3, 1], 59450: [3, 6, 12, 31, 31, 0, 0, 0], 59451: [3, 6, 12, 24, 24, 24, 24, 24], 59452: [192, 96, 48, 24, 24, 24, 24, 24], 59453: [2, 32, 1, 8, 64, 4, 128, 16], 59454: [9, 32, 4, 128, 17, 64, 8, 2], 59455: [136, 2, 64, 0, 136, 65, 0, 145], 59456: [204, 51, 204, 51, 204, 51, 204, 51], 59457: [0, 255, 0, 0, 0, 255, 0, 0], 59458: [68, 68, 68, 68, 68, 68, 68, 68], 59459: [0, 85, 0, 68, 0, 85, 0, 68], 59460: [4, 4, 4, 4, 4, 4, 255, 4], 59461: [65, 128, 65, 34, 20, 8, 20, 34], 59462: [1, 1, 255, 16, 16, 16, 255, 1], 59463: [65, 162, 85, 162, 65, 128, 0, 128], 59464: [8, 8, 170, 65, 128, 128, 170, 20], 59465: [227, 227, 221, 38, 62, 62, 221, 98], 59466: [136, 80, 34, 0, 170, 0, 34, 5], 59467: [64, 0, 68, 0, 64, 0, 85, 0], 59468: [128, 0, 8, 4, 2, 0, 32, 64], 59469: [170, 255, 2, 4, 8, 16, 32, 84], 59470: [54, 48, 3, 99, 108, 12, 192, 198], 59471: [34, 34, 34, 65, 128, 8, 20, 34], 59472: [34, 65, 128, 128, 128, 65, 34, 156], 60416: [6, 9, 144, 96, 6, 9, 144, 96], 60417: [78, 114, 243, 63, 39, 228, 252, 207], 60418: [165, 66, 165, 0, 0, 165, 66, 165], 60419: [0, 82, 52, 6, 96, 44, 74, 0], 60420: [0, 16, 44, 58, 92, 52, 4, 0], 60421: [82, 68, 45, 196, 17, 180, 35, 74], 60422: [145, 82, 0, 3, 192, 0, 74, 137], 60423: [24, 24, 60, 126, 255, 219, 24, 60], 60424: [60, 24, 219, 255, 126, 60, 24, 24], 60425: [12, 28, 57, 255, 255, 57, 28, 12], 60426: [186, 238, 170, 56, 56, 186, 254, 186], 60427: [34, 99, 247, 183, 255, 126, 60, 60], 60428: [60, 126, 255, 191, 255, 126, 60, 60], 60429: [60, 60, 24, 60, 60, 60, 60, 24], 60430: [24, 60, 60, 60, 60, 24, 60, 60], 60431: [0, 0, 123, 255, 255, 123, 0, 0], 60432: [0, 0, 222, 255, 255, 222, 0, 0], 60433: [32, 96, 32, 32, 48, 40, 60, 60], 60434: [0, 48, 88, 253, 255, 121, 48, 0], 60435: [0, 12, 26, 191, 255, 158, 12, 0], 60436: [0, 48, 88, 253, 63, 249, 48, 0], 60437: [0, 12, 26, 191, 252, 159, 12, 0], 60438: [16, 40, 104, 188, 252, 120, 16, 56], 60439: [0, 0, 252, 28, 127, 99, 62, 0], 60440: [56, 56, 146, 124, 16, 40, 40, 40], 60441: [56, 56, 16, 254, 16, 40, 68, 130], 60442: [56, 56, 18, 124, 144, 40, 36, 34], 60443: [56, 56, 144, 124, 18, 40, 72, 136], 60444: [8, 20, 8, 28, 42, 20, 62, 20], 60445: [129, 195, 231, 255, 255, 153, 36, 195], 60446: [65, 162, 60, 90, 126, 255, 66, 99], 60447: [130, 69, 60, 90, 126, 255, 66, 198], 60448: [0, 90, 189, 153, 36, 66, 36, 0], 60449: [129, 165, 90, 24, 24, 36, 195, 0], 60450: [60, 90, 255, 171, 213, 255, 221, 137], 60451: [60, 90, 255, 171, 213, 255, 119, 34], 60452: [60, 66,
#!/usr/bin/env python # coding:utf-8 import os import sys import time import signal import getpass import logging import argparse import threading from . import dag from .job import * from .config import load_config, print_config from .qsub import myQueue, QsubError from .sge import ParseSingal from .version import __version__ from .utils import * from .cluster import * from datetime import datetime from threading import Thread from subprocess import Popen, call, PIPE from collections import Counter class RunSge(object): def __init__(self, sgefile, queue, cpu, mem, name, start, end, logdir, workdir, maxjob, strict=False, mode=None, config=None): self.sgefile = ShellFile(sgefile, mode=mode, name=name, logdir=logdir, workdir=workdir) self.jfile = self.sgefile._path self.jobs = self.sgefile.jobshells(start=start, end=end) self.totaljobdict = {j.jobname: j for j in self.jobs} self.queue = queue self.mem = mem self.cpu = cpu self.maxjob = maxjob self.logdir = logdir self.is_run = False self.strict = strict self.localprocess = {} self.cloudjob = {} self.conf = config self.jobsgraph = dag.DAG() pre_dep = [] dep = [] for jb in self.jobs[:]: if jb.rawstring == "wait" and len(dep): self.jobs.remove(jb) pre_dep = dep dep = [] else: if jb.rawstring == "wait": # dup "wait" line self.jobs.remove(jb) continue self.jobsgraph.add_node_if_not_exists(jb.jobname) dep.append(jb) for i in pre_dep: self.jobsgraph.add_node_if_not_exists(i.jobname) self.jobsgraph.add_edge(i.jobname, jb.jobname) if self.conf.get("args", "call_back"): cmd = self.conf.get("args", "call_back") name = "call_back" call_back_job = ShellJob(self.sgefile, linenum=-1, cmd=cmd) call_back_job.forceToLocal(jobname=name, removelog=True) self.jobsgraph.add_node_if_not_exists(call_back_job.jobname) self.jobs.append(call_back_job) self.totaljobdict["call_back"] = call_back_job for i in self.jobsgraph.all_nodes: if i == name: continue self.jobsgraph.add_edge(i, name) if self.conf.get("args", "init"): cmd = self.conf.get("args", "init") name = "init" init_job = ShellJob(self.sgefile, linenum=-1, cmd=cmd) init_job.forceToLocal(jobname=name, removelog=True) self.jobsgraph.add_node_if_not_exists(init_job.jobname) self.jobs.append(init_job) self.totaljobdict[name] = init_job for i in self.jobsgraph.all_nodes: if i == name: continue self.jobsgraph.add_edge(name, i) self.logger.info("Total jobs to submit: %s" % ", ".join([j.name for j in self.jobs])) self.logger.info("All logs can be found in %s directory", self.logdir) self.has_success = set() for job in self.jobs[:]: lf = job.logfile job.subtimes = 0 if os.path.isfile(lf): js = self.jobstatus(job) if js != "success": os.remove(lf) job.status = "wait" else: self.jobsgraph.delete_node_if_exists(job.jobname) self.has_success.add(job.jobname) self.jobs.remove(job) else: job.status = "wait" if self.maxjob is None: self.maxjob = len(self.jobs) self.jobqueue = myQueue(maxsize=max(self.maxjob, 1)) self.conf.jobqueue = self.jobqueue self.conf.logger = self.logger self.conf.cloudjob = self.cloudjob def jobstatus(self, job): jobname = job.jobname status = job.status logfile = job.logfile if self.is_run and job.host == "batchcompute": if jobname in self.cloudjob: jobid = self.cloudjob[jobname] try: j = job.client.get_job(jobid) sta = j.State except ClientError as e: # delete by another process, status Failed self.logger.debug("Job %s not Exists", jobid) self.cloudjob.pop(jobname) sta = "Failed" if sta == "Running": status = "run" elif sta == "Finished": status = "success" elif sta == "Failed": status = "error" elif sta == "Stopped": status = "stop" elif sta == "Waiting": status = "wait" self.logger.debug("job %s status %s", jobid, status) else: if os.path.isfile(logfile): try: with os.popen('tail -n 1 %s' % logfile) as fi: sta = fi.read().split()[-1] except IndexError: if status not in ["submit", "resubmit"]: status = "run" sta = status if sta == "SUCCESS": status = "success" elif sta == "ERROR": status = "error" elif sta == "Exiting.": status = "exit" else: with os.popen("sed -n '3p' %s" % logfile) as fi: if "RUNNING..." in fi.read(): status = "run" if status != job.status and self.is_run: self.logger.info("job %s status %s", jobname, status) job.status = status if job.host == "batchcompute": with open(logfile, "a") as fo: fo.write("[%s] %s\n" % ( datetime.today().strftime("%F %X"), job.status.upper())) return status def jobcheck(self): if self.sgefile.mode == "batchcompute": rate_limiter = RateLimiter(max_calls=3, period=3) else: rate_limiter = RateLimiter(max_calls=3, period=2) while True: with rate_limiter: for jb in self.jobqueue.queue: with rate_limiter: try: js = self.jobstatus(jb) except: continue if js == "success": if jb.jobname in self.localprocess: self.localprocess[jb.jobname].wait() self.jobqueue.get(jb) self.jobsgraph.delete_node_if_exists(jb.jobname) elif js == "error": if jb.jobname in self.localprocess: self.localprocess[jb.jobname].wait() self.jobqueue.get(jb) if jb.subtimes >= self.times + 1: if self.strict: self.throw("Error jobs return(submit %d times, error), exist!, %s" % (jb.subtimes, os.path.join( self.logdir, jb.logfile))) # if error, exit program self.jobsgraph.delete_node_if_exists( jb.jobname) else: self.submit(jb) elif js == "exit": if self.strict: self.throw("Error when submit") def submit(self, job): if not self.is_run or job.status in ["run", "submit", "resubmit", "success"]: return logfile = job.logfile self.jobqueue.put(job, block=True, timeout=1080000) with open(logfile, "a") as logcmd: if job.subtimes == 0: logcmd.write(job.rawstring+"\n") job.status = "submit" elif job.subtimes > 0: logcmd.write("\n" + job.rawstring+"\n") job.status = "resubmit" self.logger.info("job %s status %s", job.name, job.status) logcmd.write("[%s] " % datetime.today().strftime("%F %X")) logcmd.flush() if job.host is not None and job.host == "localhost": cmd = "echo 'Your job (\"%s\") has been submitted in localhost' && " % job.name + job.cmd if job.subtimes > 0: cmd = cmd.replace("RUNNING", "RUNNING (re-submit)") time.sleep(self.resubivs) p = Popen(cmd, shell=True, stdout=logcmd, stderr=logcmd) self.localprocess[job.name] = p elif job.host == "sge": jobcpu = job.cpu if job.cpu else self.cpu jobmem = job.mem if job.mem else self.mem cmd = 'echo "%s" | qsub -q %s -wd %s -N %s -o %s -j y -l vf=%dg,p=%d' % ( job.cmd, " -q ".join(self.queue), self.sgefile.workdir, job.jobname, logfile, jobmem, jobcpu) if job.subtimes > 0: cmd = cmd.replace("RUNNING", "RUNNING (re-submit)") time.sleep(self.resubivs) call(cmd, shell=True, stdout=logcmd, stderr=logcmd) elif job.host == "batchcompute": jobcpu = job.cpu if job.cpu else self.cpu jobmem = job.mem if job.mem else self.mem c = Cluster(config=self.conf) c.AddClusterMount() task = Task(c) task.AddOneTask( job=job, outdir=self.conf.get("args", "outdir")) task.Submit() info = "Your job (%s) has been submitted in batchcompute (%s) %d times\n" % ( task.name, task.id, job.subtimes+1) logcmd.write(info) self.cloudjob[task.name] = task.id self.logger.debug("%s job submit %s times", job.name, job.subtimes) job.subtimes += 1 def run(self, sec=2, times=3, resubivs=2): self.is_run = True self.times = max(0, times) self.resubivs = max(resubivs, 0) for jn in self.has_success: self.logger.info("job %s status already success", jn) if len(self.jobsgraph.graph) == 0: return p = Thread(target=self.jobcheck) p.setDaemon(True) p.start() if self.sgefile.mode == "batchcompute": access_key_id = self.conf.get("args", "access_key_id") access_key_secret = self.conf.get("args", "access_key_secret") if access_key_id is None: access_key_id = self.conf.get("OSS", "access_key_id") if access_key_secret is None: access_key_secret = self.conf.get("OSS", "access_key_secret") region = REGION.get(self.conf.get("args", "region"), CN_BEIJING) client = Client(region, access_key_id, access_key_secret) quotas = client.get_quotas().AvailableClusterInstanceType cfg_path = os.path.join(os.path.dirname(__file__), "ins_type.json") with open(cfg_path) as fi: self.conf.it_conf = json.load(fi) availableTypes = [i for i in quotas if i in self.conf.it_conf] self.conf.availableTypes = sorted(availableTypes, key=lambda x: ( self.conf.it_conf[x]["cpu"], self.conf.it_conf[x]["memory"])) self.conf.client = client while True: subjobs = self.jobsgraph.ind_nodes() if len(subjobs) == 0: break for j in subjobs: jb = self.totaljobdict[j] if jb in self.jobqueue.queue: continue self.submit(jb) time.sleep(sec) @property def logger(self): return logging.getLogger(__name__) def throw(self, msg): user = getpass.getuser() if threading.current_thread().__class__.__name__ == '_MainThread': raise QsubError(msg) else: if self.sgefile.mode == "sge": self.logger.info(msg) call('qdel "%s*"' % self.sgefile.name, shell=True, stderr=PIPE, stdout=PIPE) os._exit(signal.SIGTERM) elif self.sgefile.mode == "batchcompute": for jb in self.jobqueue.queue: jobname = jb.name try: jobid = self.conf.cloudjob.get(jobname, "") j = self.conf.client.get_job(jobid) except ClientError as e: if e.status == 404: self.logger.info("Invalid JobId %s", jobid) continue except: continue if j.Name.startswith(user): if j.State not in ["Stopped", "Failed", "Finished"]: self.conf.client.stop_job(jobid) self.conf.client.delete_job(jobid) self.logger.info("Delete job %s success", j.Name) self.jobqueue.get(jb) else: self.logger.info( "Delete job error, you have no assess with job %s", j.Name) def writestates(self, outstat): summary = {j.name: j.status for j in self.jobs} with open(outstat, "w") as fo: fo.write(str(dict(Counter(summary.values()))) + "\n\n") sumout = {} for k, v in summary.items(): sumout.setdefault(v, []).append(k) for k, v in sorted(sumout.items()): fo.write( k + " : " + ", ".join(sorted(v, key=lambda x: (len(x), x))) + "\n") def parserArg(): pid = os.getpid() parser = argparse.ArgumentParser( description="For multi-run your shell scripts localhost or qsub.") parser.add_argument("-q", "--queue", type=str, help="the queue your job running, default: all.q", default=["all.q", ], nargs="*", metavar="<queue>") parser.add_argument("-m", "--memory", type=int, help="the memory used per command (GB), default: 1", default=1, metavar="<int>") parser.add_argument("-c", "--cpu", type=int, help="the cpu numbers you job used, default: 1", default=1, metavar="<int>") parser.add_argument("-wd", "--workdir", type=str, help="work dir, default: %s" % os.path.abspath(os.getcwd()), default=os.path.abspath(os.getcwd()), metavar="<workdir>") parser.add_argument("-N", "--jobname", type=str, help="job name", metavar="<jobname>") parser.add_argument("-lg", "--logdir", type=str, help='the output log dir, default: "runjob_*_log_dir"', metavar="<logdir>") parser.add_argument("-o", "--outdir", type=str, help='the oss output directory if your mode is "batchcompute", all output file will be mapping to you OSS://BUCKET-NAME. if not set, any output will be reserved', metavar="<dir>") parser.add_argument("-n", "--num", type=int, help="the max job number runing at the same time. default: all in your job file", metavar="<int>") parser.add_argument("-s", "--startline", type=int, help="which line number(0-base) be used for the first job tesk. default: 0", metavar="<int>", default=0) parser.add_argument("-e", "--endline", type=int, help="which line number (include) be used for the last job tesk. default: all in your job file", metavar="<int>") parser.add_argument('-d', '--debug', action='store_true', help='log debug info', default=False) parser.add_argument("-l", "--log", type=str, help='append log info to file, sys.stdout by default', metavar="<file>")
if mode == 'add': array[ind] = self.ndarray[ind] + function[0]['f'+key](functimearray) elif mode == 'sub': array[ind] = self.ndarray[ind] - function[0]['f'+key](functimearray) elif mode == 'values': array[ind] = function[0]['f'+key](functimearray) elif mode == 'div': array[ind] = self.ndarray[ind] / function[0]['f'+key](functimearray) elif mode == 'multiply': array[ind] = self.ndarray[ind] * function[0]['f'+key](functimearray) else: print("func2stream: mode not recognized") return DataStream(self,self.header,np.asarray(array,dtype=object)) for elem in self: # check whether time step is in function range if function[1] <= elem.time <= function[2]: functime = (elem.time-function[1])/(function[2]-function[1]) for key in keys: if not key in KEYLIST[1:16]: raise ValueError("Column key not valid") fkey = 'f'+key exec('keyval = elem.'+key) if fkey in function[0] and not isnan(keyval): try: newval = keyval + function[0][fkey](functime) except: newval = float('nan') exec('elem.'+key+' = newval') else: pass else: pass return self def func_subtract(self,funclist,**kwargs): """ Subtract a function from the selected values of the data stream -> e.g. obtain Residuals Optional: keys (default = '<KEY>') :type order int :param order : 0 -> stream - function; 1 -> function - stream """ keys = kwargs.get('keys') order = kwargs.get('order') st = DataStream() st = self.copy() if isinstance(funclist[0], dict): funct = [funclist] else: funct = funclist function = funct[0] # Direct call of old version only accepts single function """ for el in self: li = LineStruct() li.time = el.time li.x = el.x li.y = el.y li.z = el.z st.add(li) """ if not order: order = 0 if not keys: keys = ['<KEY>'] for elem in st: # check whether time step is in function range if function[1] <= elem.time <= function[2]: functime = (elem.time-function[1])/(function[2]-function[1]) for key in keys: if not key in KEYLIST[1:16]: raise ValueError("Column key not valid") fkey = 'f'+key exec('keyval = elem.'+key) if fkey in function[0] and not isnan(keyval): try: if order == 0: newval = keyval - function[0][fkey](functime) else: newval = function[0][fkey](functime) - keyval except: newval = float('nan') exec('elem.'+key+' = newval') else: pass else: pass return st def func2header(self,funclist,debug=False): """ DESCRIPTION Add a list of functions into the data header """ if isinstance(funclist[0], dict): funct = [funclist] else: funct = funclist self.header['DataFunctionObject'] = funct return self def GetKeyName(self,key): """ DESCRIPTION get the content name of a specific key will scan header information until successful: (1) col-"key" names (2) ColumnContent header info (3) SensorElements header info if no Name for the key is found, then the key itself is returned APPLICATION: element = datastream.GetKeyName('var1') """ if not key in KEYLIST: print ("key not in KEYLIST - aborting") return '' element = '' # One try: element = self.header.get("col-{}".format(key)) if not element == '': return element except: pass # Two try: element = self.header.get('ColumnContents','').split(',')[KEYLIST.index(key)] if not element == '': return element except: pass # Three try: idx = self.header.get('SensorKeys','').split(',').index(key) element = self.header.get('SensorElements','').split(',')[idx] if not element == '': return element except: pass return key def GetKeyUnit(self,key): """ DESCRIPTION get the content name of a specific key will scan header information until successful: (1) unit-col-"key" names (2) ColumnUnit header info if no unit for the key is found, then an empty string is returned APPLICATION: unit = datastream.GetKeyUnit('var1') """ if not key in KEYLIST: print ("key not in KEYLIST - aborting") return '' unit = '' # One try: unit = self.header.get("unit-col-{}".format(key)) if not unit == '': return unit except: pass # Two try: unit = self.header.get('ColumnUnits','').split(',')[KEYLIST.index(key)] if not unit == '': return unit except: pass return unit def get_gaps(self, **kwargs): """ DEFINITION: Takes the dominant sample frequency and fills nan into non-existing time steps: This function provides the basis for discontinuous plots and gap analysis and proper filtering. PARAMETERS: Variables: --- Kwargs: - accuracy: (float) time relative to a day - default 1 sec - gapvariable: (string) - refering to stream column - default='var5' - This column is overwritten with 0 (data) and 1 (no data). - key: (string) - refering to a data column e.g. key='x'. If given then all NaN values with existing time steps are also marked by '1' in the gapvariable line for this key RETURNS: - stream: (Datastream) EXAMPLE: >>> stream_with_gaps_filled = stream_with_aps.get_gaps(['f']) APPLICATION: used by nfilter() for correct filtering CHANGES: Last updated and tested with nfilter function by leon 2014-07-22 """ accuracy = kwargs.get('accuracy') key = kwargs.get('key') gapvariable = kwargs.get('gapvariable') debug = kwargs.get('debug') if key in KEYLIST: gapvariable = True if not gapvariable: gapvariable = 'var5' if not self.length()[0] > 1: print ("get_gaps: Stream does not contain data - aborting") return self # Better use get_sampling period as samplingrate is rounded #spr = self.get_sampling_period() #newsps = newsp*3600.0*24.0 newsps = self.samplingrate() newsp = newsps/3600.0/24.0 if not accuracy: accuracy = 0.9/(3600.0*24.0) # one second relative to day accuracy = 0.05*newsp # 5 percent of samplingrate if newsps < 0.9 and not accuracy: accuracy = (newsps-(newsps*0.1))/(3600.0*24.0) logger.info('--- Starting filling gaps with NANs at %s ' % (str(datetime.now()))) stream = self.copy() prevtime = 0 ndtype = False if len(stream.ndarray[0]) > 0: maxtime = stream.ndarray[0][-1] mintime = stream.ndarray[0][0] length = len(stream.ndarray[0]) sourcetime = stream.ndarray[0] ndtype = True else: mintime = self[0].time maxtime = self[-1].time if debug: print("Time range:", mintime, maxtime) print("Length, samp_per and accuracy:", self.length()[0], newsps, accuracy) shift = 0 if ndtype: # Get time diff and expected count timediff = maxtime - mintime expN = int(round(timediff/newsp))+1 if debug: print("Expected length vs actual length:", expN, length) if expN == len(sourcetime): # Found the expected amount of time steps - no gaps logger.info("get_gaps: No gaps found - Returning") return stream else: # correct way (will be used by default) - does not use any accuracy value #projtime = np.linspace(mintime, maxtime, num=expN, endpoint=True) #print("proj:", projtime, len(projtime)) # find values or projtime, which are not in sourcetime #dif = setdiff1d(projtime,sourcetime, assume_unique=True) #print (dif, len(dif)) #print (len(dif),len(sourcetime),len(projtime)) diff = sourcetime[1:] - sourcetime[:-1] num_fills = np.round(diff / newsp) - 1 getdiffids = np.where(diff > newsp+accuracy)[0] logger.info("get_gaps: Found gaps - Filling nans to them") if debug: print ("Here", diff, num_fills, newsp, getdiffids) missingt = [] # Get critical differences and number of missing steps for i in getdiffids: #print (i, sourcetime[i-1], sourcetime[i], sourcetime[i+1]) nf = num_fills[i] # if nf is larger than zero then get append the missing time steps to missingt list if nf > 0: for n in range(int(nf)): # add n+1 * samplingrate for each missing value missingt.append(sourcetime[i]+(n+1)*newsp) print ("Filling {} gaps".format(len(missingt))) # Cycle through stream and append nans to each column for missing time steps nans = [np.nan] * len(missingt) empts = [''] * len(missingt) gaps = [0.0] * len(missingt) for idx,elem in enumerate(stream.ndarray): if idx == 0: # append missingt list to array element elem = list(elem) lenelem = len(elem) elem.extend(missingt) stream.ndarray[idx] = np.asarray(elem).astype(object) elif len(elem) > 0: # append nans list to array element elem = list(elem) if KEYLIST[idx] in NUMKEYLIST or KEYLIST[idx] == 'sectime': elem.extend(nans) else: elem.extend(empts) stream.ndarray[idx] = np.asarray(elem).astype(object) elif KEYLIST[idx] == gapvariable: # append nans list to array element elem = [1.0]*lenelem elem.extend(gaps) stream.ndarray[idx] = np.asarray(elem).astype(object) return stream.sorting() else: stream = DataStream() for elem in self: if abs((prevtime+newsp) - elem.time) > accuracy and not prevtime == 0: currtime = num2date(prevtime)+timedelta(seconds=newsps) while currtime <= num2date(elem.time): newline = LineStruct() exec('newline.'+gapvariable+' = 1.0') newline.time = date2num(currtime) stream.add(newline) currtime += timedelta(seconds=newsps) else: exec('elem.'+gapvariable+' = 0.0') if key in KEYLIST: if isnan(eval('elem.'+key)): exec('elem.'+gapvariable+' = 1.0') stream.add(elem) prevtime = elem.time logger.info('--- Filling gaps finished at %s ' % (str(datetime.now()))) if debugmode: print("Ending:", stream[0].time, stream[-1].time) return stream.sorting() def get_rotationangle(self, xcompensation=0,keys=['x','y','z'],**kwargs): """ DESCRIPTION: "Estimating" the rotation angle towards a magnetic coordinate system assuming z to be vertical down. Please note: You need to provide a complete horizontal vector including either the x compensation field or if not available an annual estimate of the vector. This method can be used to determine reorientation characteristics in order to accurately apply HDZ optimzed basevalue calculations. RETURNS: rotangle (float) The estimated rotation angle in degree """ annualmeans = kwargs.get('annualmeans') #1. get vector from data # x
= np.array(state_considered)[-1 - delay] if list(new_relevant_state) == self.target_point: reward += 1.0 reward *= self.reward_scale noise_in_reward = self.reward_noise(self.np_random) if self.reward_noise else 0 # #random ###TODO Would be better to parameterise this in terms of state, action and time_step as well. Would need to change implementation to have a queue for the rewards achieved and then pick the reward that was generated delay timesteps ago. self.total_abs_noise_in_reward_episode += np.abs(noise_in_reward) self.total_reward_episode += reward reward += noise_in_reward reward += self.reward_shift return reward def step(self, action, imaginary_rollout=False): """The step function for the environment. Parameters ---------- action : int or np.array The action that the environment will use to perform a transition. imaginary_rollout: boolean Option for the user to perform "imaginary" transitions, e.g., for model-based RL. If set to true, underlying augmented state of the MDP is not changed and user is responsible to maintain and provide a list of states to this function to be able to perform a rollout. Returns ------- int or np.array, double, boolean, dict The next state, reward, whether the episode terminated and additional info dict at the end of the current transition """ # For imaginary transitions, discussion: # 1) Use external observation_space as argument to P() and R(). But then it's not possible for P and R to know underlying MDP state unless we pass it as another argument. This is not desirable as we want P and R to simply be functions of external state/observation and action. 2) The other possibility is how it's currently done: P and R _know_ the underlying state. But in this case, we need an extra imaginary_rollout argument to P and R and we can't perform imaginary rollouts longer than one step without asking the user to maintain a sequence of underlying states and actions to be passed as arguments to P and R. # P and R knowing the underlying state seems a poor design choice to me # because it makes the code structure more brittle, so I propose that # step() handles the underlying state vs external observation conversion # and user can use P and R with underlying state. And step should handle # the case of imaginary rollouts by building a tree of transitions and # allowing rollback to states along the tree. However, user will probably # want access to P and R by using only observations as well instead of the # underlying state. In this case, P and R need to be aware of underlying # state and be able to store imaginary rollouts if needed. # Transform multi-discrete to discrete for discrete state spaces with # irrelevant dimensions; needed only for imaginary rollouts, otherwise, # internal augmented state is used. if imaginary_rollout: print("Imaginary rollouts are currently not supported.") sys.exit(1) if self.config["state_space_type"] == "discrete": if self.irrelevant_features: state, action, state_irrelevant, action_irrelevant = ( self.curr_state[0], action[0], self.curr_state[1], action[1], ) else: state, action = self.curr_state, action else: # cont. or grid case state, action = self.curr_state, action # ### TODO Decide whether to give reward before or after transition ("after" would mean taking next state into account and seems more logical to me) - make it a dimension? - R(s) or R(s, a) or R(s, a, s')? I'd say give it after and store the old state in the augmented_state to be able to let the R have any of the above possible forms. That would also solve the problem of implicit 1-step delay with giving it before. _And_ would not give any reward for already being in a rewarding state in the 1st step but _would_ give a reward if 1 moved to a rewardable state - even if called with R(s, a) because s' is stored in the augmented_state! #####IMP # ###TODO P uses last state while R uses augmented state; for cont. env, P does know underlying state_derivatives - we don't want this to be the case for the imaginary rollout scenario; next_state = self.P(state, action) # if imaginary_rollout: # pass # # print("imaginary_rollout") # Since transition_function currently depends only on current state and action, we don't need to do anything here! # else: del self.augmented_state[0] if self.config["state_space_type"] == "discrete": self.augmented_state.append(next_state) elif self.config["state_space_type"] == "continuous": self.augmented_state.append(next_state.copy()) elif self.config["state_space_type"] == "grid": self.augmented_state.append([next_state[i] for i in range(2)]) self.total_transitions_episode += 1 self.reward = self.R(self.augmented_state, action) # #irrelevant dimensions part if self.config["state_space_type"] == "discrete": if self.irrelevant_features: next_state_irrelevant = self.config["transition_function_irrelevant"][ state_irrelevant, action_irrelevant ] if self.transition_noise: probs = ( np.ones(shape=(self.state_space_size[1],)) * self.transition_noise / (self.state_space_size[1] - 1) ) probs[next_state_irrelevant] = 1 - self.transition_noise new_next_state_irrelevant = self.observation_spaces[1].sample( prob=probs ) # #random # if next_state_irrelevant != new_next_state_irrelevant: # print("NOISE inserted! old next_state_irrelevant, new_next_state_irrelevant", next_state_irrelevant, new_next_state_irrelevant) # self.total_noisy_transitions_irrelevant_episode += 1 next_state_irrelevant = new_next_state_irrelevant # Transform discrete back to multi-discrete if needed if self.config["state_space_type"] == "discrete": if self.irrelevant_features: next_obs = next_state = (next_state, next_state_irrelevant) else: next_obs = next_state else: # cont. or grid space next_obs = next_state if self.image_representations: next_obs = self.observation_space.get_concatenated_image(next_state) self.curr_state = next_state self.curr_obs = next_obs # #### TODO curr_state is external state, while we need to check relevant state for terminality! Done - by using augmented_state now instead of curr_state! self.done = (self.is_terminal_state(self.augmented_state[-1]) or self.reached_terminal) if self.done: self.reward += ( self.term_state_reward * self.reward_scale ) # Scale before or after? self.logger.info( "sas'r: " + str(self.augmented_state[-2]) + " " + str(action) + " " + str(self.augmented_state[-1]) + " " + str(self.reward) ) return self.curr_obs, self.reward, self.done, self.get_augmented_state() def get_augmented_state(self): """Intended to return the full augmented state which would be Markovian. (However, it's not Markovian wrt the noise in P and R because we're not returning the underlying RNG.) Currently, returns the augmented state which is the sequence of length "delay + sequence_length + 1" of past states for both discrete and continuous environments. Additonally, the current state derivatives are also returned for continuous environments. Returns ------- dict Contains at the end of the current transition """ # #TODO For noisy processes, this would need the noise distribution and random seed too. Also add the irrelevant state parts, etc.? We don't need the irrelevant parts for the state to be Markovian. if self.config["state_space_type"] == "discrete": augmented_state_dict = { "curr_state": self.curr_state, "curr_obs": self.curr_obs, "augmented_state": self.augmented_state, } elif self.config["state_space_type"] == "continuous": augmented_state_dict = { "curr_state": self.curr_state, "curr_obs": self.curr_obs, "augmented_state": self.augmented_state, "state_derivatives": self.state_derivatives, } elif self.config["state_space_type"] == "grid": augmented_state_dict = { "curr_state": self.curr_state, "curr_obs": self.curr_obs, "augmented_state": self.augmented_state, } return augmented_state_dict def reset(self): """Resets the environment for the beginning of an episode and samples a start state from rho_0. For discrete environments uses the defined rho_0 directly. For continuous environments, samples a state and resamples until a non-terminal state is sampled. Returns ------- int or np.array The start state for a new episode. """ # on episode "end" stuff (to not be invoked when reset() called when # self.total_episodes = 0; end is in quotes because it may not be a true # episode end reached by reaching a terminal state, but reset() may have # been called in the middle of an episode): if not self.total_episodes == 0: self.logger.info( "Noise stats for previous episode num.: " + str(self.total_episodes) + " (total abs. noise in rewards, total abs." " noise in transitions, total reward, total noisy transitions, total" " transitions): " + str(self.total_abs_noise_in_reward_episode) + " " + str(self.total_abs_noise_in_transition_episode) + " " + str(self.total_reward_episode) + " " + str(self.total_noisy_transitions_episode) + " " + str(self.total_transitions_episode) ) # on episode start stuff: self.reward_buffer = [0.0] * (self.delay) self.total_episodes += 1 if self.config["state_space_type"] == "discrete": self.curr_state_relevant = self.np_random.choice( self.state_space_size[0], p=self.config["relevant_init_state_dist"] ) # #random self.curr_state = self.curr_state_relevant # # curr_state set here # already in case if statement below is not entered if self.irrelevant_features: self.curr_state_irrelevant = self.np_random.choice( self.state_space_size[1], p=self.config["irrelevant_init_state_dist"], ) # #random self.curr_state = (self.curr_state_relevant, self.curr_state_irrelevant) self.logger.info( "RESET called. Relevant part of state reset to:" + str(self.curr_state_relevant) ) self.logger.info( "Irrelevant part of state reset to:" +
<reponame>Joshuaalbert/bayes_tec<gh_stars>0 import matplotlib matplotlib.use('Agg') import numpy as np import os from concurrent import futures from ..datapack import DataPack from ..frames import UVW from ..logging import logging import astropy.coordinates as ac import astropy.time as at import astropy.units as au from scipy.spatial import ConvexHull, cKDTree import time from scipy.spatial.distance import pdist import psutil import pylab as plt plt.style.use('ggplot') from matplotlib.patches import Polygon, Rectangle from matplotlib.collections import PatchCollection import matplotlib.colors as colors try: import cmocean phase_cmap = cmocean.cm.phase except ImportError: phase_cmap = plt.cm.hsv class DatapackPlotter(object): def __init__(self,datapack): if isinstance(datapack,str): datapack = DataPack(filename=datapack,readonly=True) self.datapack = datapack def _create_polygon_plot(self,points, values=None, N = None,ax=None,cmap=plt.cm.bone,overlay_points=None,annotations=None,title=None,polygon_labels=None,reverse_x=False): # get nearest points (without odd voronoi extra regions) k = cKDTree(points) dx = np.max(points[:,0]) - np.min(points[:,0]) dy = np.max(points[:,1]) - np.min(points[:,1]) delta = pdist(points) N = N or int(min(max(100,2*np.max(delta)/np.min(delta)),500)) x = np.linspace(np.min(points[:,0])-0.1*dx,np.max(points[:,0])+0.1*dx,N) y = np.linspace(np.min(points[:,1])-0.1*dy,np.max(points[:,1])+0.1*dy,N) X,Y = np.meshgrid(x,y,indexing='ij') # interior points population points_i = np.array([X.flatten(),Y.flatten()]).T # The match per input point dist,i = k.query(points_i,k=1) # the polygons are now created using convex hulls # order is by point order patches = [] for group in range(points.shape[0]): points_g = points_i[i==group,:] if points_g.size == 0: logging.debug("Facet {} has zero size".format(group)) poly = Polygon(points[group:group+1,:],closed=False) else: hull = ConvexHull(points_g) nodes = points_g[hull.vertices,:] poly = Polygon(nodes,closed=False) patches.append(poly) if ax is None: fig,ax = plt.subplots() logging.info("Making new plot") if values is None: values = np.zeros(len(patches))#random.uniform(size=len(patches)) p = PatchCollection(patches,cmap=cmap) p.set_array(values) ax.add_collection(p) #plt.colorbar(p) if overlay_points is not None: if annotations is None: ax.scatter(overlay_points[:,0],overlay_points[:,1],marker='+',c='black') else: for point, a in zip(overlay_points, annotations): ax.text(point[0],point[1],a,ha='center',va='center',backgroundcolor=(1.,1.,1., 0.1)) if reverse_x: ax.set_xlim([np.max(points_i[:,0]),np.min(points_i[:,0])]) else: ax.set_xlim([np.min(points_i[:,0]),np.max(points_i[:,0])]) ax.set_ylim([np.min(points_i[:,1]),np.max(points_i[:,1])]) ax.set_facecolor('black') ax.grid(b=True,color='black') if title is not None: if reverse_x: ax.text(np.max(points_i[:,0])-0.05*dx,np.max(points_i[:,1])-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5)) else: ax.text(np.min(points_i[:,0])+0.05*dx,np.max(points_i[:,1])-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5)) # Rectangle((x, y), 0.5, 0.5, # alpha=0.1,facecolor='red',label='Label')) # ax.annotate(title,xy=(0.8,0.8),xycoords='axes fraction') return ax, p def _create_image_plot(self,points, values=None, N = None,ax=None,cmap=plt.cm.bone,overlay_points=None,annotations=None,title=None,reverse_x=False): ''' Create initial plot, with image data instead of polygons. points: (ra, dec) values: array [n, m] or None, assumes (dec, ra) ordering ie (y,x) ''' dx = np.max(points[0]) - np.min(points[0]) dy = np.max(points[1]) - np.min(points[1]) if values is not None: Ndec,Nra = values.shape else: Ndec,Nra = len(points[1]),len(points[0]) values = np.zeros([Ndec,Nra]) if ax is None: fig,ax = plt.subplots() logging.info("Making new plot") x = np.linspace(np.min(points[0]),np.max(points[0]),Nra) y = np.linspace(np.min(points[1]),np.max(points[1]),Ndec) img = ax.imshow(values,origin='lower',cmap=cmap, aspect='auto', extent=(x[0],x[-1],y[0],y[-1])) if overlay_points is not None: if annotations is None: ax.scatter(overlay_points[:,0],overlay_points[:,1],marker='+',c='black') else: for point, a in zip(overlay_points, annotations): ax.text(point[0],point[1],a,ha='center',va='center',backgroundcolor=(1.,1.,1., 0.1)) if reverse_x: ax.set_xlim([x[-1],x[0]]) else: ax.set_xlim([x[0],x[-1]]) ax.set_ylim([y[0],y[-1]]) ax.set_facecolor('black') ax.grid(b=True,color='black') if title is not None: if reverse_x: ax.text(x[-1]-0.05*dx,y[-1]-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5)) else: ax.text(x[0]+0.05*dx,y[-1]-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5)) return ax, img def plot(self, ant_sel=None,time_sel=None,freq_sel=None,dir_sel=None,pol_sel=None, fignames=None, vmin=None,vmax=None,mode='perantenna',observable='phase',phase_wrap=True, log_scale=False, plot_crosses=True,plot_facet_idx=False,plot_patchnames=False,labels_in_radec=False,show=False, plot_arrays=False, solset=None, plot_screen=False, tec_eval_freq=None, **kwargs): """ Plot datapack with given parameters. """ SUPPORTED = ['perantenna'] assert mode in SUPPORTED, "only 'perantenna' supported currently".format(SUPPORTED) if fignames is None: save_fig = False show = True else: save_fig = True show = show and True #False if plot_patchnames: plot_facet_idx = False if plot_patchnames or plot_facet_idx: plot_crosses = False if not show: logging.debug('turning off display') matplotlib.use('Agg') ### # Set up plotting with self.datapack: self.datapack.switch_solset(solset) logging.info("Applying selection: ant={},time={},freq={},dir={},pol={}".format(ant_sel,time_sel,freq_sel,dir_sel,pol_sel)) self.datapack.select(ant=ant_sel,time=time_sel,freq=freq_sel,dir=dir_sel,pol=pol_sel) obs,axes = self.datapack.__getattr__(observable) if observable.startswith('weights_'): obs = np.sqrt(np.abs(1./obs)) #uncert from weights = 1/var phase_wrap=False if 'pol' in axes.keys(): # plot only first pol selected obs = obs[0,...] #obs is dir, ant, freq, time antenna_labels, antennas = self.datapack.get_antennas(axes['ant']) patch_names, directions = self.datapack.get_sources(axes['dir']) timestamps, times = self.datapack.get_times(axes['time']) freq_dep = True try: freq_labels, freqs = self.datapack.get_freqs(axes['freq']) except: freq_dep = False obs = obs[:,:,None,:] freq_labels, freqs = [""],[None] if tec_eval_freq is not None: obs = obs*-8.4480e9/tec_eval_freq if phase_wrap: obs = np.angle(np.exp(1j*obs)) vmin = -np.pi vmax = np.pi cmap = phase_cmap else: vmin = vmin or np.percentile(obs.flatten(),1) vmax = vmax or np.percentile(obs.flatten(),99) cmap = plt.cm.bone if log_scale: obs = np.log10(obs) Na = len(antennas) Nt = len(times) Nd = len(directions) Nf = len(freqs) fixfreq = Nf >> 1 logging.info("Plotting {} directions".format(Nd)) logging.info("Plotting {} antennas".format(Na)) logging.info("Plotting {} timestamps".format(Nt)) _, antennas_ = self.datapack.get_antennas([self.datapack.ref_ant]) #ants_uvw = antennas.transform_to(uvw) ref_dist = np.sqrt((antennas.x - antennas_.x)**2 + (antennas.y - antennas_.y)**2 + (antennas.z - antennas_.z)**2).to(au.km).value # if labels_in_radec: ra = directions.ra.deg dec = directions.dec.deg if not plot_screen: ### points are normal points = np.array([ra,dec]).T if plot_crosses or plot_patchnames or plot_facet_idx: overlay_points = points else: overlay_points = None else: ### get unique ra and dec and then rearrange into correct order. _ra = np.unique(ra) _dec = np.unique(dec) Nra = len(_ra) Ndec = len(_dec) assert Ndec * Nra == Nd ### sort lexiconially ind = np.lexsort((ra,dec)) points = (_ra, _dec) obs = obs[ind, ...] obs = obs.reshape((Ndec,Nra,Na,Nf,Nt)) if plot_crosses: overlay_points = None # put the facet (ra,dec).T else: overlay_points = None if plot_patchnames: annotations = patch_names elif plot_facet_idx: annotations = np.array([str(k) for k in range(Nd)]) else: annotations = None if fignames is not None: if not isinstance(fignames,(tuple,list)): fignames = [fignames] if fignames is not None: assert Nt == len(fignames) if mode == 'perantenna': M = int(np.ceil(np.sqrt(Na))) fig,axs = plt.subplots(nrows=M,ncols=M,sharex='col',sharey='row',squeeze=False, \ figsize=(4*M,4*M)) fig.subplots_adjust(wspace=0., hspace=0.) axes_patches = [] c = 0 for row in range(M): for col in range(M): ax = axs[row,col] if col == 0: ax.set_ylabel("Projected North (radians)" if not labels_in_radec else "DEC (deg)") if row == M - 1: ax.set_xlabel("Projected East (radians)" if not labels_in_radec else "RA (deg)") if c >= Na: continue try: title = antenna_labels[c].decode() except: title = antenna_labels[c] if plot_screen: _, p = self._create_image_plot(points, values=None, N = None, ax=ax,cmap=cmap,overlay_points=overlay_points, annotations=annotations, title="{} {:.1f}km".format(title, ref_dist[c]), reverse_x=labels_in_radec) else: _, p = self._create_polygon_plot(points, values=None, N = None, ax=ax,cmap=cmap,overlay_points=overlay_points, annotations=annotations, title="{} {:.1f}km".format(title, ref_dist[c]), reverse_x=labels_in_radec) p.set_clim(vmin,vmax) axes_patches.append(p) c += 1 fig.subplots_adjust(right=0.85) cbar_ax = fig.add_axes([0.875, 0.15, 0.025, 0.7]) fig.colorbar(p, cax=cbar_ax, orientation='vertical') if show: plt.ion() plt.show() for j in range(Nt): logging.info("Plotting {}".format(timestamps[j])) for i in range(Na): if not plot_screen: axes_patches[i].set_array(obs[:,i,fixfreq,j]) else: axes_patches[i].set_array(obs[:,:,i,fixfreq,j]) axs[0,0].set_title("{} {} : {}".format(observable, freq_labels[fixfreq], timestamps[j])) fig.canvas.draw() if save_fig: plt.savefig(fignames[j]) if show: # plt.close(fig) plt.ioff() def _parallel_plot(arg): datapack,time_slice,kwargs,output_folder=arg dp = DatapackPlotter(datapack=datapack) with dp.datapack: # Get the time selection desired dp.datapack.select(time=kwargs.get('time_sel',None)) axes = dp.datapack.axes_phase # timeslice the selection times = axes['time']#mjs sel_list = times[time_slice] kwargs['time_sel'] = sel_list fignames = [os.path.join(output_folder,"fig-{:04d}.png".format(j)) for j in range(len(times))[time_slice]] dp.plot(fignames=fignames,**kwargs) return fignames def animate_datapack(datapack,output_folder,num_processes,**kwargs): """ Plot the datapack in parallel, then stitch into movie. datapack: str the datapack filename output_folder: str, folder to store figs in num_processes: int number of parallel plotting processes to run **kwargs: keywords to pass to DatapackPlotter.plot function. """ try: os.makedirs(output_folder) except: pass if num_processes is None: num_processes = psutil.cpu_count() if isinstance(datapack,DataPack): datapack = datapack.filename # with DataPack(datapack) as datapack_fix: # datapack_fix.add_antennas(DataPack.lofar_array) args = [] for i in range(num_processes): args.append((datapack,slice(i,None,num_processes),kwargs,output_folder)) with futures.ProcessPoolExecutor(max_workers=num_processes) as executor: jobs = executor.map(_parallel_plot,args) results = list(jobs) plt.close('all') make_animation(output_folder,prefix='fig',fps=4) def make_animation(datafolder,prefix='fig',fps=4): '''Given a datafolder with figures of format `prefix`-%04d.png create a video at framerate `fps`. Output is datafolder/animation.mp4''' if os.system('ffmpeg -framerate {} -i {}/{}-%04d.png -vf scale="trunc(iw/2)*2:trunc(ih/2)*2" -c:v libx264 -profile:v high -pix_fmt yuv420p -g 30 -r 30 {}/animation.mp4'.format(fps,datafolder,prefix,datafolder)): logging.info("{}/animation.mp4 exists already".format(datafolder)) def plot_phase_vs_time(datapack,output_folder, solsets='sol000', ant_sel=None,time_sel=None,dir_sel=None,freq_sel=None,pol_sel=None): if isinstance(datapack,DataPack): datapack = datapack.filename if not isinstance(solsets , (list,tuple)): solsets = [solsets] output_folder = os.path.abspath(output_folder) os.makedirs(output_folder,exist_ok=True) with DataPack(datapack,readonly=True) as datapack: phases = [] stds = [] for solset in solsets: datapack.switch_solset(solset) datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel) weights,axes = datapack.weights_phase freq_ind = len(axes['freq']) >> 1 freq = axes['freq'][freq_ind] ant = axes['ant'][0] phase,_ = datapack.phase std = np.sqrt(np.abs(weights)) timestamps,times = datapack.get_times(axes['time']) phases.append(phase) stds.append(std) for phase in phases: for s,S in zip(phase.shape,phases[0].shape): assert s==S Npol,Nd,Na,Nf,Nt = phases[0].shape fig,ax = plt.subplots() for p in range(Npol): for d in range(Nd): for a in range(Na): for f in range(Nf): ax.cla() for i,solset in enumerate(solsets): phase = phases[i] std = stds[i] label = "{} {} {:.1f}MHz {}:{}".format(solset, axes['pol'][p], axes['freq'][f]/1e6, axes['ant'][a], axes['dir'][d]) ax.fill_between(times.mjd,phase[p,d,a,f,:]-2*std[p,d,a,f,:],phase[p,d,a,f,:]+2*std[p,d,a,f,:],alpha=0.5,label=r'$\pm2\hat{\sigma}_\phi$')#,color='blue') ax.scatter(times.mjd,phase[p,d,a,f,:],marker='+',alpha=0.3,color='black',label=label) ax.set_xlabel('Time [mjd]') ax.set_ylabel('Phase deviation [rad.]') ax.legend() filename = "{}_{}_{}_{}MHz.png".format(axes['ant'][a], axes['dir'][d], axes['pol'][p], axes['freq'][f]/1e6 ) plt.savefig(os.path.join(output_folder,filename)) plt.close('all') def plot_data_vs_solution(datapack,output_folder, data_solset='sol000', solution_solset='posterior_sol', show_prior_uncert=False, ant_sel=None,time_sel=None,dir_sel=None,freq_sel=None,pol_sel=None): def _wrap(phi): return np.angle(np.exp(1j*phi)) if isinstance(datapack,DataPack): datapack = datapack.filename output_folder = os.path.abspath(output_folder) os.makedirs(output_folder,exist_ok=True) solsets = [data_solset, solution_solset] with DataPack(datapack,readonly=True) as datapack: phases = [] stds = [] datapack.switch_solset(data_solset) datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel) weights,axes = datapack.weights_phase _,freqs = datapack.get_freqs(axes['freq']) phase,_ = datapack.phase std = np.sqrt(np.abs(1./weights)) timestamps,times = datapack.get_times(axes['time']) phases.append(_wrap(phase)) stds.append(std) tec_conversion = -8.4480e9/freqs[None,None,None,:,None] datapack.switch_solset(solution_solset) datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel) weights,_ = datapack.weights_tec tec,_ = datapack.tec std = np.sqrt(np.abs(1./weights))[:,:,:,None,:]*np.abs(tec_conversion) phases.append(_wrap(tec[:,:,:,None,:]*tec_conversion)) stds.append(std) for phase in phases: for s,S in zip(phase.shape,phases[0].shape): assert s==S Npol,Nd,Na,Nf,Nt = phases[0].shape fig,ax = plt.subplots()
metadata. :param filepath: savepath of the OME-TIFF stack :type filepath: str :param imgarray: multi-dimensional image array :type imgarray: NumPy.Array :param metadata: metadata dictionary with the required information to create an correct OME-TIFF file :type metadata: dict :param reader: string (aicsimagio or czifile) specifying the used reader, defaults to aicsimageio :type metadata: str :param overwrite: option to overwrite an existing OME-TIFF, defaults to False :type overwrite: bool, optional """ # define scaling from metadata or use defualt scaling try: pixels_physical_size = [metadata['XScale'], metadata['YScale'], metadata['ZScale']] except KeyError as e: print('Key not found:', e) print('Use default scaling XYZ=1.0') pixels_physical_size = [1.0, 1.0, 1.0] # define channel names list from metadata try: channel_names = [] for ch in metadata['Channels']: channel_names.append(ch) except KeyError as e: print('Key not found:', e) channel_names = None # get the dimensions and their position inside the dimension string if reader == 'aicsimageio': dims_dict, dimindex_list, numvalid_dims = get_dimorder(metadata['Axes_aics']) # if the array has more than 5 dimensions then remove the S dimension # because it is not supported by OME-TIFF if len(imgarray.shape) > 5: try: imgarray = np.squeeze(imgarray, axis=dims_dict['S']) except Exception: print('Could not remover S Dimension from string.)') # remove the S character from the dimension string new_dimorder = metadata['Axes_aics'].replace('S', '') if reader == 'czifile': new_dimorder = metadata['Axes'] dims_dict, dimindex_list, numvalid_dims = get_dimorder(metadata['Axes']) """ '0': 'Sample', # e.g. RGBA 'X': 'Width', 'Y': 'Height', 'C': 'Channel', 'Z': 'Slice', # depth 'T': 'Time', 'R': 'Rotation', 'S': 'Scene', # contiguous regions of interest in a mosaic image 'I': 'Illumination', # direction 'B': 'Block', # acquisition 'M': 'Mosaic', # index of tile for compositing a scene 'H': 'Phase', # e.g. Airy detector fibers 'V': 'View', # e.g. for SPIM """ to_remove = [] # list of unspupported dims for writing an OME-TIFF dims = ['R', 'I', 'M', 'H', 'V', 'B', 'S', '0'] for dim in dims: if dims_dict[dim] >= 0: # remove the CZI DIMENSION character from the dimension string new_dimorder = new_dimorder.replace(dim, '') # add dimension index to the list of axis to be removed to_remove.append(dims_dict[dim]) print('Remove Dimension:', dim) # create tuple with dimensions to be removed dims2remove = tuple(to_remove) # remove dimensions from array imgarray = np.squeeze(imgarray, axis=dims2remove) # write the array as an OME-TIFF incl. the metadata try: with ome_tiff_writer.OmeTiffWriter(savepath, overwrite_file=overwrite) as writer: writer.save(imgarray, channel_names=channel_names, ome_xml=None, image_name=os.path.basename((savepath)), pixels_physical_size=pixels_physical_size, channel_colors=None, dimension_order=new_dimorder) writer.close() except Exception as error: print(error.__class__.__name__ + ": " + error.msg) print('Could not write OME-TIFF') savepath = None return savepath def correct_omeheader(omefile, old=("2012-03", "2013-06", r"ome/2016-06"), new=("2016-06", "2016-06", r"OME/2016-06") ): """This function is actually a workaround for AICSImageIO<=3.1.4 that correct some incorrect namespaces inside the OME-XML header :param omefile: OME-TIFF image file :type omefile: string :param old: strings that should be corrected, defaults to ("2012-03", "2013-06", r"ome/2016-06") :type old: tuple, optional :param new: replacement for the strings to be corrected, defaults to ("2016-06", "2016-06", r"OME/2016-06") :type new: tuple, optional """ # create the tif object from the filename tif = tifffile.TiffFile(omefile) # get the pixel array and the OME-XML string array = tif.asarray() omexml_string = tif.ome_metadata # search for the strings to be replaced and do it for ostr, nstr in zip(old, new): print('Replace: ', ostr, 'with', nstr) omexml_string = omexml_string.replace(ostr, nstr) # save the file with the new, correct strings tifffile.imsave(omefile, array, photometric='minisblack', description=omexml_string) # close tif object tif.close() print('Updated OME Header.') def get_fname_woext(filepath): """Get the complete path of a file without the extension It alos will works for extensions like c:\myfile.abc.xyz The output will be: c:\myfile :param filepath: complete fiepath :type filepath: str :return: complete filepath without extension :rtype: str """ # create empty string real_extension = '' # get all part of the file extension sufs = Path(filepath).suffixes for s in sufs: real_extension = real_extension + s # remover real extension from filepath filepath_woext = filepath.replace(real_extension, '') return filepath_woext def convert_to_ometiff(imagefilepath, bftoolsdir='/Users/bftools', czi_include_attachments=False, czi_autostitch=True, verbose=True): """Convert image file using bfconvert tool into a OME-TIFF from with a python script. :param imagefilepath: path to imagefile :type imagefilepath: str :param bftoolsdir: bftools directory containing the bfconvert, defaults to '/Users/bftools' :type bftoolsdir: str, optional :param czi_include_attachments: option convert a CZI attachment (if CZI), defaults to False :type czi_include_attachments: bool, optional :param czi_autostitch: option stich a CZI, defaults to True :type czi_autostitch: bool, optional :param verbose: show additional output, defaults to True :type verbose: bool, optional :return: fileparh of created OME-TIFF file :rtype: str """ # check if path exits if not os.path.exists(bftoolsdir): print('No bftools dirctory found. Nothing will be converted') file_ometiff = None if os.path.exists(bftoolsdir): # set working dir os.chdir(bftoolsdir) # get the imagefile path without extension imagefilepath_woext = get_fname_woext(imagefilepath) # create imagefile path for OME-TIFF file_ometiff = imagefilepath_woext + '.ome.tiff' # create cmdstring for CZI files- mind the spaces !!! if imagefilepath.lower().endswith('.czi'): # configure the CZI options if czi_include_attachments: czi_att = 'true' if not czi_include_attachments: czi_att = 'false' if czi_autostitch: czi_stitch = 'true' if not czi_autostitch: czi_stitch = 'false' # create cmdstring - mind the spaces !!! cmdstring = 'bfconvert -no-upgrade -option zeissczi.attachments ' + czi_att + ' -option zeissczi.autostitch ' + \ czi_stitch + ' "' + imagefilepath + '" "' + file_ometiff + '"' else: # create cmdstring for non-CZIs- mind the spaces !!! cmdstring = 'bfconvert -no-upgrade' + ' "' + imagefilepath + '" "' + file_ometiff + '"' if verbose: print('Original ImageFile : ', imagefilepath_woext) print('ImageFile OME.TIFF : ', file_ometiff) print('Use CMD : ', cmdstring) # run the bfconvert tool with the specified parameters os.system(cmdstring) print('Done.') return file_ometiff def get_dimpositions(dimstring, tocheck=['B', 'S', 'T', 'Z', 'C']): """Simple function to get the indices of the dimension identifiers in a string :param dimstring: dimension string :type dimstring: str :param tocheck: list of entries to check, defaults to ['B', 'S', 'T', 'Z', 'C'] :type tocheck: list, optional :return: dictionary with positions of dimensions inside string :rtype: dict """ dimpos = {} for p in tocheck: dimpos[p] = dimstring.find(p) return dimpos def norm_columns(df, colname='Time [s]', mode='min'): """Normalize a specif column inside a Pandas dataframe :param df: DataFrame :type df: pf.DataFrame :param colname: Name of the coumn to be normalized, defaults to 'Time [s]' :type colname: str, optional :param mode: Mode of Normalization, defaults to 'min' :type mode: str, optional :return: Dataframe with normalized column :rtype: pd.DataFrame """ # normalize columns according to min or max value if mode == 'min': min_value = df[colname].min() df[colname] = df[colname] - min_value if mode == 'max': max_value = df[colname].max() df[colname] = df[colname] - max_value return df def update5dstack(image5d, image2d, dimstring5d='TCZYX', t=0, z=0, c=0): # remove XY dimstring5d = dimstring5d.replace('X', '').replace('Y', '') if dimstring5d == 'TZC': image5d[t, z, c, :, :] = image2d if dimstring5d == 'TCZ': image5d[t, c, z, :, :] = image2d if dimstring5d == 'ZTC': image5d[z, t, c, :, :] = image2d if dimstring5d == 'ZCT': image5d[z, c, t, :, :] = image2d if dimstring5d == 'CTZ': image5d[c, t, z, :, :] = image2d if dimstring5d == 'CZT': image5d[c, z, t, :, :] = image2d return image5d def getdims_pylibczi(czi): # Get the shape of the data, the coordinate pairs are (start index, size) # [{'X': (0, 1900), 'Y': (0, 1300), 'Z': (0, 60), 'C': (0, 4), 'S': (0, 40), 'B': (0, 1)}] # dimensions = czi.dims_shape() dimsizes = {} for d in range(len(czi.dims)): # print(d) dimsizes['Size' + czi.dims[d]] = czi.size[d] return dimsizes def calc_normvar(img2d): """Determine normalized focus value for a 2D image - based on algorithm F - 11 "Normalized Variance" - Taken from: Sun et al., 2004. MICROSCOPY RESEARCH AND TECHNIQUE 65, 139–149. - Maximum value is best-focused, decreasing as defocus increases :param img2d: 2D image :type img2d: NumPy.Array :return: normalized focus value for the 2D image :rtype: float """ mean = np.mean(img2d) height = img2d.shape[0] width = img2d.shape[1] # subtract the mean and sum up the whole array fi = (img2d - mean)**2 b = np.sum(fi) # calculate the normalized variance value normvar = b / (height * width * mean)
"""Implementation of GraphTensor data type. """ import abc from typing import Any, Dict, Mapping, Optional, Union import tensorflow as tf from tensorflow_gnn.graph import graph_constants as const from tensorflow_gnn.graph import graph_piece as gp # pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import type_spec # pylint: enable=g-direct-tensorflow-import FieldName = const.FieldName NodeSetName = const.NodeSetName EdgeSetName = const.EdgeSetName ShapeLike = const.ShapeLike Field = const.Field Fields = const.Fields FieldSpec = const.FieldSpec FieldsSpec = const.FieldsSpec # TODO(b/189057503): use adjacency interface class instead. Adjacency = Any AdjacencySpec = Any class _GraphPieceWithFeatures(gp.GraphPieceBase, metaclass=abc.ABCMeta): """Base class for graph pieces that hold user-defined features.""" def __getitem__(self, feature_name: FieldName) -> Field: """Indexing operator `[]` to access feature values by their name.""" return self._get_features_ref[feature_name] @property def features(self) -> Mapping[FieldName, Field]: """Read-only view for features.""" return _as_immutable_mapping(self._get_features_ref) def get_features_dict(self) -> Dict[FieldName, Field]: """Returns features copy as a dictionary.""" return dict(self._get_features_ref) @abc.abstractproperty def _get_features_ref(self) -> Fields: """Returns the mutable features dict. Subclass controls location in data.""" raise NotImplementedError class _GraphPieceWithFeaturesSpec(gp.GraphPieceSpecBase): """TypeSpec for _GraphPieceWithFeatures.""" def __getitem__(self, feature_name: FieldName) -> FieldSpec: return self._get_features_spec_ref[feature_name] @property def features_spec(self) -> Mapping[FieldName, FieldSpec]: """A mapping of feature name to feature specs.""" return _as_immutable_mapping(self._get_features_spec_ref) @abc.abstractproperty def total_num_components(self) -> Optional[int]: """The total number of graph components across dimensions if known.""" raise NotImplementedError @abc.abstractproperty def _get_features_spec_ref(self) -> FieldsSpec: raise NotImplementedError class Context(_GraphPieceWithFeatures): """A container of features for a graph component. This class is a container for the shapes of the context features associated with each component of a graph in a `GraphTensor` instance. Note that the number of components of those features is always explicitly set to `1` (in lieu of the number of nodes, we've got one such feature per graph). (Note that this graph piece does not use any metadata fields.) """ @classmethod def from_fields( cls, *, features: Optional[Fields] = None, shape: ShapeLike = tf.TensorShape([]), indices_dtype: tf.dtypes.DType = const.default_indices_dtype ) -> 'Context': """Constructs a new instance from context fields. Args: features: mapping from feature names to feature Tensors or RaggedTensors. All feature tensors must have shape = graph_shape + [num_components] + feature_shape, where num_components is a number of graph components (could be ragged); feature_shape are field-specific inner dimensions. shape: the shape of this tensor and a GraphTensor containing it, also known as the graph_shape. indices_dtype: The `indices_dtype` of a GraphTensor containing this object, used as `row_splits_dtype` when batching potentially ragged fields. Returns: A `Context` tensor. """ if features is None: features = {} assert isinstance(features, Mapping) prepared_features = {key: gp.convert_to_tensor_or_ragged(value) for key, value in features.items()} return cls._from_data( prepared_features, shape=shape if isinstance(shape, tf.TensorShape) else tf.TensorShape(shape), indices_dtype=indices_dtype) def replace_features(self, features: Fields) -> 'Context': """Returns a new instance with a new set of features.""" assert isinstance(features, Mapping) return self.__class__.from_fields( features=features, shape=self.shape, indices_dtype=self.indices_dtype) @property def _get_features_ref(self) -> Fields: return self._data @staticmethod def _type_spec_cls(): return ContextSpec @type_spec.register('tensorflow_gnn.ContextSpec') class ContextSpec(_GraphPieceWithFeaturesSpec): """A type spec for global features for a graph component. This class is a type descriptor for the shapes of the context features associated with each component of a graph in a `GraphTensor` instance. Note that the prefix shape of those features is always explicitly set to either `1` for a single graph, or to the number of components for a batched graph. (Note that this graph piece does not use any metadata fields.) """ @classmethod def from_field_specs( cls, *, features_spec: Optional[FieldsSpec] = None, shape: ShapeLike = tf.TensorShape([]), indices_dtype: tf.dtypes.DType = const.default_indices_dtype ) -> 'ContextSpec': """Counterpart of `Context.from_fields()` for values type specs.""" if features_spec is None: features_spec = {} assert isinstance(features_spec, Mapping) return cls._from_data_spec( features_spec, shape=shape if isinstance(shape, tf.TensorShape) else tf.TensorShape(shape), indices_dtype=indices_dtype) @property def value_type(self): return Context @property def total_num_components(self) -> Optional[int]: """The total number of graph components across dimensions if known.""" indicative_feature_spec = _get_indicative_feature_spec(self._data_spec) if indicative_feature_spec is None: return None else: return indicative_feature_spec.shape[:(self.rank + 1)].num_elements() @property def _get_features_spec_ref(self) -> FieldsSpec: return self._data_spec class _NodeOrEdgeSet(_GraphPieceWithFeatures): """Base class for node set or edge set.""" _DATAKEY_FEATURES = 'features' # A Mapping[FieldName, Field]. _DATAKEY_SIZES = 'sizes' # A Field with `sizes`. @classmethod def _from_fields(cls, features: Fields, sizes: Field, **extra_data) -> '_NodeOrEdgeSet': assert isinstance(features, Mapping) sizes = gp.convert_to_tensor_or_ragged(sizes) prepared_features = {key: gp.convert_to_tensor_or_ragged(value) for key, value in features.items()} data = { _NodeOrEdgeSet._DATAKEY_FEATURES: prepared_features, _NodeOrEdgeSet._DATAKEY_SIZES: sizes } data.update({key: gp.convert_to_tensor_or_ragged(value) for key, value in extra_data.items()}) return cls._from_data( data=data, shape=sizes.shape[:-1], indices_dtype=sizes.dtype) def replace_features(self, features: Mapping[FieldName, Field]) -> '_NodeOrEdgeSet': """Returns a new instance with a new set of features.""" assert isinstance(features, Mapping) new_data = self._data.copy() new_data.update({_NodeOrEdgeSet._DATAKEY_FEATURES: features}) return self.__class__.from_fields(**new_data) @property def sizes(self) -> Field: """Tensor with a number of elements in each graph component.""" return self._data[_NodeOrEdgeSet._DATAKEY_SIZES] @property def total_size(self) -> tf.Tensor: """Returns the total number of elements across dimensions. Returns: Scalar integer tensor equal to `tf.math.reduce_sum(sizes)`. """ result = tf.math.reduce_sum(self.sizes) assert isinstance(result, tf.Tensor) and result.shape.rank == 0 return result @property def _get_features_ref(self) -> Fields: return self._data[_NodeOrEdgeSet._DATAKEY_FEATURES] class _NodeOrEdgeSetSpec(_GraphPieceWithFeaturesSpec): """TypeSpec for _NodeOrEdgeSet.""" @classmethod def _from_field_specs(cls, features_spec: FieldsSpec, sizes_spec: FieldSpec, **extra_data) -> '_NodeOrEdgeSetSpec': # pylint: disable=protected-access assert isinstance(features_spec, Mapping) data_spec = { _NodeOrEdgeSet._DATAKEY_FEATURES: features_spec, _NodeOrEdgeSet._DATAKEY_SIZES: sizes_spec } data_spec.update(extra_data) return cls._from_data_spec( data_spec, shape=sizes_spec.shape[:-1], indices_dtype=sizes_spec.dtype) @property def sizes_spec(self) -> FieldSpec: """A type spec for the sizes that provides num. elements per component.""" return self._data_spec[_NodeOrEdgeSet._DATAKEY_SIZES] # pylint: disable=protected-access @property def total_num_components(self) -> Optional[int]: """The total number of graph components across dimensions if known.""" return self.sizes_spec.shape.num_elements() @property def total_size(self) -> Optional[int]: """Returns the total number of graph entities across dimensions if known.""" indicative_feature_spec = _get_indicative_feature_spec( self._get_features_spec_ref) if indicative_feature_spec is None: return None else: return indicative_feature_spec.shape[:(self.rank + 1)].num_elements() @property def _get_features_spec_ref(self) -> FieldsSpec: return self._data_spec[_NodeOrEdgeSet._DATAKEY_FEATURES] # pylint: disable=protected-access class NodeSet(_NodeOrEdgeSet): """A container for the features of a single node set. This class is a container for the shapes of the features associated with a graph's node set from a `GraphTensor` instance. This graph piece stores features that belong to an edge set, and a `sizes` tensor with the number of edges in each graph component. (This graph piece does not use any metadata fields.) """ @classmethod def from_fields(cls, *, features: Optional[Fields] = None, sizes: Field) -> 'NodeSet': """Constructs a new instance from node set fields. Args: features: mapping from feature names to feature Tensors or RaggedTensors. All feature tensors must have shape = graph_shape + [num_nodes] + feature_shape, where num_nodes is the number of graph nodes in this set (could be ragged) and feature_shape are feature-specific inner dimensions. sizes: the number of nodes in each graph component. Has shape = graph_shape + [num_components], where num_components is the number of graph components (could be ragged). Returns: A `NodeSet` tensor. """ if features is None: features = {} return cls._from_fields(features=features, sizes=sizes) @staticmethod def _type_spec_cls(): return NodeSetSpec @type_spec.register('tensorflow_gnn.NodeSetSpec') class NodeSetSpec(_NodeOrEdgeSetSpec): """A type spec for the features of a single node set. This class is a type descriptor for the shapes of the features associated with a graph's node set from a `GraphTensor` instance. This graph piece stores features that belong to an edge set, and a `sizes` tensor with the number of edges in each graph component. (This graph piece does not use any metadata fields.) """ @classmethod def from_field_specs(cls, *, features_spec: Optional[FieldsSpec] = None, sizes_spec: FieldSpec) -> 'NodeSetSpec': """Counterpart of `NodeSet.from_fields()` for values type specs.""" if features_spec is None: features_spec = {} return cls._from_field_specs( features_spec=features_spec, sizes_spec=sizes_spec) @property def value_type(self): return NodeSet class EdgeSet(_NodeOrEdgeSet): """A container for the features of a single edge set. This class is a container for the shapes of the features associated with a graph's edge set from a `GraphTensor` instance. This graph piece stores features that belong to an edge set, a `sizes` tensor with the number of edges in each graph component and an `adjacency` `GraphPiece` tensor describing how this edge set connects node sets (see adjacency.py). (This graph piece does not use any metadata fields.) """ _DATAKEY_ADJACENCY = 'adjacency' # An Adjacency GraphPiece. @classmethod def from_fields(cls, *, features: Optional[Fields] = None, sizes: Field, adjacency: Adjacency) -> 'EdgeSet': """Constructs a new instance from edge set fields. Args: features: mapping from feature names to feature Tensors or RaggedTensors. All feature tensors must have shape = graph_shape + [num_edges] + feature_shape, where num_edges is the number of edges in the edge set (could be ragged) and
<reponame>CandyHuiZhang/deep-reinforcement-learning-pytorch<gh_stars>1-10 from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import random import time import math import torch from torch.autograd import Variable, grad, backward import torch.nn.functional as F from utils.helpers import ACER_On_Policy_Experience from utils.distributions import sample_poisson, categorical_kl_div from optims.helpers import adjust_learning_rate from core.agent_single_process import AgentSingleProcess class ACERSingleProcess(AgentSingleProcess): def __init__(self, master, process_id=0): super(ACERSingleProcess, self).__init__(master, process_id) # lstm hidden states if self.master.enable_lstm: self._reset_on_policy_lstm_hidden_vb_episode() # clear up hidden state self._reset_on_policy_lstm_hidden_vb_rollout() # detach the previous variable from the computation graph self._reset_off_policy_lstm_hidden_vb() # clear up hidden state, since sampled batches won't be connected from previous batches # # NOTE global variable pi # if self.master.enable_continuous: # self.pi_vb = Variable(torch.Tensor([math.pi]).type(self.master.dtype)) self.master.logger.warning("Registered ACER-SingleProcess-Agent #" + str(self.process_id) + " w/ Env (seed:" + str(self.env.seed) + ").") # NOTE: to be called at the beginning of each new episode, clear up the hidden state def _reset_on_policy_lstm_hidden_vb_episode(self, training=True): # seq_len, batch_size, hidden_dim not_training = not training if self.master.enable_continuous: # self.on_policy_lstm_hidden_vb = (Variable(torch.zeros(2, self.master.hidden_dim).type(self.master.dtype), volatile=not_training), # Variable(torch.zeros(2, self.master.hidden_dim).type(self.master.dtype), volatile=not_training)) pass else: # for self.model self.on_policy_lstm_hidden_vb = (Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=not_training), Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=not_training)) # for self.master.avg_model # NOTE: no grads are needed to compute on this model, so always volatile self.on_policy_avg_lstm_hidden_vb = (Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=True), Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=True)) # NOTE: to be called at the beginning of each rollout, detach the previous variable from the graph def _reset_on_policy_lstm_hidden_vb_rollout(self): # for self.model self.on_policy_lstm_hidden_vb = (Variable(self.on_policy_lstm_hidden_vb[0].data), Variable(self.on_policy_lstm_hidden_vb[1].data)) # for self.master.avg_model self.on_policy_avg_lstm_hidden_vb = (Variable(self.on_policy_avg_lstm_hidden_vb[0].data), Variable(self.on_policy_avg_lstm_hidden_vb[1].data)) # NOTE: to be called before each off-policy learning phase # NOTE: keeping it separate so as not to mess up the on_policy_lstm_hidden_vb if the current on-policy episode has not finished after the last rollout def _reset_off_policy_lstm_hidden_vb(self, training=True): not_training = not training if self.master.enable_continuous: pass else: # for self.model self.off_policy_lstm_hidden_vb = (Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype), volatile=not_training), Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype), volatile=not_training)) # for self.master.avg_model # NOTE: no grads are needed to be computed on this model self.off_policy_avg_lstm_hidden_vb = (Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype)), Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype))) def _preprocessState(self, state, on_policy, is_valotile=False): if isinstance(state, list): state_vb = [] for i in range(len(state)): if on_policy: state_vb.append(Variable(torch.from_numpy(state[i]).unsqueeze(0).type(self.master.dtype), volatile=is_valotile)) else: state_vb.append(Variable(torch.from_numpy(state[i]).view(-1, self.master.state_shape).type(self.master.dtype), volatile=is_valotile)) else: if on_policy: state_vb = Variable(torch.from_numpy(state).unsqueeze(0).type(self.master.dtype), volatile=is_valotile) else: state_vb = Variable(torch.from_numpy(state).view(-1, self.master.state_shape).type(self.master.dtype), volatile=is_valotile) return state_vb def _forward(self, state_vb, on_policy=True): if self.master.enable_continuous: pass else: if self.master.enable_lstm: if on_policy: # learn from the current experience p_vb, q_vb, v_vb, self.on_policy_lstm_hidden_vb = self.model(state_vb, self.on_policy_lstm_hidden_vb) avg_p_vb, _, _, self.on_policy_avg_lstm_hidden_vb = self.master.avg_model(state_vb, self.on_policy_avg_lstm_hidden_vb) # then we also need to get an action for the next time step if self.training: action = p_vb.multinomial().data[0][0] else: action = p_vb.max(1)[1].data.squeeze().numpy()[0] return action, p_vb, q_vb, v_vb, avg_p_vb else: # learn from the sampled replays p_vb, q_vb, v_vb, self.off_policy_lstm_hidden_vb = self.model(state_vb, self.off_policy_lstm_hidden_vb) avg_p_vb, _, _, self.off_policy_avg_lstm_hidden_vb = self.master.avg_model(state_vb, self.off_policy_avg_lstm_hidden_vb) return _, p_vb, q_vb, v_vb, avg_p_vb else: pass class ACERLearner(ACERSingleProcess): def __init__(self, master, process_id=0): master.logger.warning("<===================================> ACER-Learner #" + str(process_id) + " {Env & Model & Memory}") super(ACERLearner, self).__init__(master, process_id) # NOTE: diff from pure on-policy methods like a3c, acer is capable of # NOTE: off-policy learning and can make use of replay buffer self.memory = self.master.memory_prototype(capacity = self.master.memory_params.memory_size // self.master.num_processes, max_episode_length = self.master.early_stop) self._reset_rollout() self.training = True # choose actions by polinomial self.model.train(self.training) # local counters self.frame_step = 0 # local frame step counter self.train_step = 0 # local train step counter self.on_policy_train_step = 0 # local on-policy train step counter self.off_policy_train_step = 0 # local off-policy train step counter # local training stats self.p_loss_avg = 0. # global policy loss self.v_loss_avg = 0. # global value loss self.entropy_loss_avg = 0. # global entropy loss self.loss_counter = 0 # storing this many losses self._reset_training_loggings() # copy local training stats to global every prog_freq self.last_prog = time.time() def _reset_training_loggings(self): self.p_loss_avg = 0. self.v_loss_avg = 0. self.entropy_loss_avg = 0. self.loss_counter = 0 def _reset_rollout(self): # for storing the experiences collected through one rollout self.rollout = ACER_On_Policy_Experience(state0 = [], action = [], reward = [], state1 = [], terminal1 = [], policy_vb = [], q0_vb = [], value0_vb = [], detached_avg_policy_vb = [], detached_old_policy_vb = []) def _get_QretT_vb(self, on_policy=True): if on_policy: if self.rollout.terminal1[-1]: # for terminal sT: Q_ret = 0 QretT_vb = Variable(torch.zeros(1, 1)) else: # for non-terminal sT: Qret = V(s_i; /theta) sT_vb = self._preprocessState(self.rollout.state1[-1], on_policy, True) # bootstrap from last state if self.master.enable_lstm: _, _, QretT_vb, _ = self.model(sT_vb, self.on_policy_lstm_hidden_vb)# NOTE: only doing inference here else: _, _, QretT_vb = self.model(sT_vb) # NOTE: only doing inference here # # NOTE: here QretT_vb.volatile=True since sT_vb.volatile=True # # NOTE: if we use detach() here, it would remain volatile # # NOTE: then all the follow-up computations would only give volatile loss variables # QretT_vb = Variable(QretT_vb.data) else: sT_vb = self._preprocessState(self.rollout.state1[-1], on_policy, True) # bootstrap from last state if self.master.enable_lstm: _, _, QretT_vb, _ = self.model(sT_vb, self.off_policy_lstm_hidden_vb) # NOTE: only doing inference here else: _, _, QretT_vb = self.model(sT_vb) # NOTE: only doing inference here # now we have to also set QretT_vb to 0 for terminal sT's QretT_vb = ((1 - Variable(torch.from_numpy(np.array(self.rollout.terminal1[-1])).float())) * QretT_vb) # NOTE: here QretT_vb.volatile=True since sT_vb.volatile=True # NOTE: if we use detach() here, it would remain volatile # NOTE: then all the follow-up computations would only give volatile loss variables return Variable(QretT_vb.data) def _1st_order_trpo(self, detached_policy_loss_vb, detached_policy_vb, detached_avg_policy_vb, detached_splitted_policy_vb=None): on_policy = detached_splitted_policy_vb is None # KL divergence k = \delta_{\phi_{\theta}} DKL[ \pi(|\phi_{\theta_a}) || \pi{|\phi_{\theta}}] # kl_div_vb = F.kl_div(detached_policy_vb.log(), detached_avg_policy_vb, size_average=False) # NOTE: the built-in one does not work on batch kl_div_vb = categorical_kl_div(detached_policy_vb, detached_avg_policy_vb) # NOTE: k & g are wll w.r.t. the network output, which is detached_policy_vb # NOTE: gradient from this part will not flow back into the model # NOTE: that's why we are only using detached policy variables here if on_policy: k_vb = grad(outputs=kl_div_vb, inputs=detached_policy_vb, retain_graph=False, only_inputs=True)[0] g_vb = grad(outputs=detached_policy_loss_vb, inputs=detached_policy_vb, retain_graph=False, only_inputs=True)[0] else: # NOTE NOTE NOTE !!! # NOTE: here is why we cannot simply detach then split the policy_vb, but must split before detach # NOTE: cos if we do that then the split cannot backtrace the grads computed in this later part of the graph # NOTE: it would have no way to connect to the graphs in the model k_vb = grad(outputs=(kl_div_vb.split(1, 0)), inputs=(detached_splitted_policy_vb), retain_graph=False, only_inputs=True) g_vb = grad(outputs=(detached_policy_loss_vb.split(1, 0)), inputs=(detached_splitted_policy_vb), retain_graph=False, only_inputs=True) k_vb = torch.cat(k_vb, 0) g_vb = torch.cat(g_vb, 0) kg_dot_vb = (k_vb * g_vb).sum(1, keepdim=True) kk_dot_vb = (k_vb * k_vb).sum(1, keepdim=True) z_star_vb = g_vb - ((kg_dot_vb - self.master.clip_1st_order_trpo) / kk_dot_vb).clamp(min=0) * k_vb return z_star_vb def _update_global_avg_model(self): for global_param, global_avg_param in zip(self.master.model.parameters(), self.master.avg_model.parameters()): global_avg_param = self.master.avg_model_decay * global_avg_param + \ (1 - self.master.avg_model_decay) * global_param def _backward(self, unsplitted_policy_vb=None): on_policy = unsplitted_policy_vb is None # preparation rollout_steps = len(self.rollout.reward) if self.master.enable_continuous: pass else: action_batch_vb = Variable(torch.from_numpy(np.array(self.rollout.action)).view(rollout_steps, -1, 1).long()) # [rollout_steps x batch_size x 1] if self.master.use_cuda: action_batch_vb = action_batch_vb.cuda() if not on_policy: # we save this transformation for on-policy reward_batch_vb = Variable(torch.from_numpy(np.array(self.rollout.reward)).view(rollout_steps, -1, 1).float()) # [rollout_steps x batch_size x 1] # NOTE: here we use the detached policies, cos when using 1st order trpo, # NOTE: the policy losses are not directly backproped into the model # NOTE: but only backproped up to the output of the network # NOTE: and to make the code consistent, we also decouple the backprop # NOTE: into two parts when not using trpo policy update # NOTE: requires_grad of detached_policy_vb must be True, otherwise grad will not be able to # NOTE: flow between the two stagets of backprop if on_policy: policy_vb = self.rollout.policy_vb detached_splitted_policy_vb = None detached_policy_vb = [Variable(self.rollout.policy_vb[i].data, requires_grad=True) for i in range(rollout_steps)] # [rollout_steps x batch_size x action_dim] else: # NOTE: here rollout.policy_vb is already split by trajectories, we can safely detach and not causing trouble for feed in tuples into grad later # NOTE: rollout.policy_vb: undetached, splitted -> what we stored during the fake _off_policy_rollout # NOTE: policy_vb: undetached, batch -> 1. entropy, cos grad from entropy need to flow back through the whole graph 2. the backward of 2nd stage should be computed on this # NOTE: detached_splitted_policy_vb: detached, splitted