filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_8304 | #!C:/Program Files/Python38/python.exe
import binascii
import mysql.connector
from datetime import datetime
# open save file
filename = r'C:\Users\Aaron\AppData\Roaming\RetroArch\states\nhl94_updated.state'
with open(filename,'rb') as inputfile:
content = inputfile.read()
hexFile = binascii.hexlify(content).decode('utf-8')
n = 2
hexes = [(hexFile[i:i+n]) for i in range(0, len(hexFile), n)]
# connect to MySQL
mydb = mysql.connector.connect(
host='localhost',
user='nhl94',
password='HpMZ6o6UMi',
database='nhl94seasonreplay'
)
mycursor = mydb.cursor()
# get team ids
teams = {}
mycursor.execute("SELECT * FROM teams WHERE hexvalue = '{}'".format(hexes[49984]))
teams['home'] = mycursor.fetchone()
mycursor.execute("SELECT * FROM teams WHERE hexvalue = '{}'".format(hexes[49986]))
teams['away'] = mycursor.fetchone()
# get the scheduleid
with open("www\currentgame.txt",'r') as inputfile:
scheduleid = inputfile.read()
# insert into games table (game stats)
sql = "INSERT INTO games (schedule_id, team_id, goals, shots, ppgoals, ppattempts, ppseconds, ppshots, shgoals, breakawayattempts, breakawaygoals, onetimerattempts, onetimergoals, penaltyshotattempts, penaltyshotgoals, faceoffswon, bodychecks, penalties, pim, attackzoneseconds, passattempts, passsuccess, insert_datetime) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
now = datetime.now()
insert_datetime = now.strftime("%Y-%m-%d %H:%M:%S")
val = [
(scheduleid,teams['home'][0],int(str(hexes[50923]) + str(hexes[50922]),16),int(str(hexes[50911]) + str(hexes[50910]),16),int(str(hexes[50913]) + str(hexes[50912]),16),int(str(hexes[50915]) + str(hexes[50914]),16),int(str(hexes[51761]) + str(hexes[51760]),16),int(str(hexes[51763]) + str(hexes[51762]),16),int(str(hexes[51765]) + str(hexes[51764]),16),int(str(hexes[51767]) + str(hexes[51766]),16),int(str(hexes[51769]) + str(hexes[51768]),16),int(str(hexes[51771]) + str(hexes[51770]),16),int(str(hexes[51773]) + str(hexes[51772]),16),int(str(hexes[51775]) + str(hexes[51774]),16),int(str(hexes[51777]) + str(hexes[51776]),16),int(str(hexes[50925]) + str(hexes[50924]),16),int(str(hexes[50927]) + str(hexes[50926]),16),int(str(hexes[50917]) + str(hexes[50916]),16),int(str(hexes[50919]) + str(hexes[50918]),16),int(str(hexes[50921]) + str(hexes[50920]),16),int(str(hexes[50929]) + str(hexes[50928]),16),int(str(hexes[50931]) + str(hexes[50930]),16),insert_datetime),
(scheduleid,teams['away'][0],int(str(hexes[51791]) + str(hexes[51790]),16),int(str(hexes[51779]) + str(hexes[51778]),16),int(str(hexes[51781]) + str(hexes[51780]),16),int(str(hexes[51783]) + str(hexes[51782]),16),int(str(hexes[52629]) + str(hexes[52628]),16),int(str(hexes[52631]) + str(hexes[52630]),16),int(str(hexes[52633]) + str(hexes[52632]),16),int(str(hexes[52635]) + str(hexes[52634]),16),int(str(hexes[52637]) + str(hexes[52636]),16),int(str(hexes[52639]) + str(hexes[52638]),16),int(str(hexes[52641]) + str(hexes[52640]),16),int(str(hexes[52643]) + str(hexes[52642]),16),int(str(hexes[52645]) + str(hexes[52644]),16),int(str(hexes[51793]) + str(hexes[51792]),16),int(str(hexes[51795]) + str(hexes[51794]),16),int(str(hexes[51785]) + str(hexes[51784]),16),int(str(hexes[51787]) + str(hexes[51786]),16),int(str(hexes[51789]) + str(hexes[51788]),16),int(str(hexes[51797]) + str(hexes[51796]),16),int(str(hexes[51799]) + str(hexes[51798]),16),insert_datetime)
]
mycursor.executemany(sql, val)
mydb.commit()
games = {}
games['home'] = mycursor.lastrowid
games['away'] = mycursor.lastrowid + 1
# print('Game Stats successfully uploaded')
# insert into periodstats table
sql = "INSERT INTO periodstats (game_id, period, goals, shots) VALUES (%s, %s, %s, %s)"
periodstats = []
# periodrange = int(hexes[50294],16) + 1
periodrange = 4
for p in range(0,periodrange):
periodstats.append([games['home'],p + 1,int(str(hexes[51745 + p * 2]) + str(hexes[51744 + p * 2]),16),int(str(hexes[51753 + p * 2]) + str(hexes[51752 + p * 2]),16)])
periodstats.append([games['away'],p + 1,int(str(hexes[52613 + p * 2]) + str(hexes[52612 + p * 2]),16),int(str(hexes[52621 + p * 2]) + str(hexes[52620 + p * 2]),16)])
mycursor.executemany(sql, periodstats)
mydb.commit()
# print('Period Stats successfully uploaded')
# get rosters from database
rosters = {}
for key, value in teams.items():
mycursor.execute("SELECT * FROM players WHERE team_id = {} ORDER BY id ASC".format(teams[key][0]))
rosters[key] = mycursor.fetchall()
# insert into scoringsummary table
scoringsummary = []
scorsummsize = int(hexes[50306],16)
for i in range(50308, 50308 + scorsummsize, 6):
if hexes[i + 1][:1] == '0':
period = 1
elif hexes[i + 1][:1] == '4':
period = 2
elif hexes[i + 1][:1] == '8':
period = 3
else:
period = 4
timeelapsed = int(str(hexes[i + 1][1:]) + str(hexes[i]),16)
goaltype = hexes[i + 3]
if hexes[i + 3][:1] == '0':
scoresummary_team = 'home'
else:
scoresummary_team = 'away'
goalscorer = rosters[scoresummary_team][int(hexes[i + 2],16)][0]
if hexes[i + 5] != 'ff':
assist1 = rosters[scoresummary_team][int(hexes[i + 5],16)][0]
else:
assist1 = None
if hexes[i + 4] != 'ff':
assist2 = rosters[scoresummary_team][int(hexes[i + 4],16)][0]
else:
assist2 = None
scoringsummary.append([scheduleid,period,timeelapsed,goaltype,goalscorer,assist1,assist2])
# run SQL
sql = "INSERT INTO scoringsummary (schedule_id,period,timeelapsed,goaltype,goal_player_id,assist1_player_id,assist2_player_id) VALUES (%s, %s, %s, %s, %s, %s, %s)"
mycursor.executemany(sql, scoringsummary)
mydb.commit()
# print('Scoring Summary successfully uploaded')
# insert into penaltysummary table
penaltysummary = []
penasummsize = int(hexes[50668],16)
for i in range(50670, 50670 + penasummsize, 4):
if hexes[i + 1][:1] == '0':
period = 1
elif hexes[i + 1][:1] == '4':
period = 2
elif hexes[i + 1][:1] == '8':
period = 3
else:
period = 4
timeelapsed = int(str(hexes[i + 1][1:]) + str(hexes[i]),16)
penaltytype = int(hexes[i + 3],16)
if penaltytype < 100:
penaltysummary_team = 'home'
penaltyid = penaltytype - 16
else:
penaltysummary_team = 'away'
penaltyid = penaltytype - 144
penaltyon = rosters[penaltysummary_team][int(hexes[i + 2],16)][0]
penaltysummary.append([scheduleid,period,timeelapsed,penaltyid,penaltyon])
# run SQL
sql = "INSERT INTO penaltysummary (schedule_id,period,timeelapsed,penalty_id,player_id) VALUES (%s, %s, %s, %s, %s)"
mycursor.executemany(sql, penaltysummary)
mydb.commit()
# print('Penalty Summary successfully uploaded')
# insert into playerstats table
playerstats = {}
# home
players = []
for player_index, v in enumerate(rosters['home']):
players.append([games['home'],rosters['home'][player_index][0],
int(hexes[51089 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[51115 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[51141 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[51167 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[51193 + player_index + (player_index + 1) % 2 * 2],16),
int(str(hexes[51219 + (player_index + 1) * 2]) + (hexes[51218 + (player_index + 1) * 2]),16)
])
playerstats['home'] = players
# away
players = []
for player_index, v in enumerate(rosters['away']):
players.append([games['away'],rosters['away'][player_index][0],
int(hexes[51957 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[51983 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[52009 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[52035 + player_index + (player_index + 1) % 2 * 2],16),
int(hexes[52061 + player_index + (player_index + 1) % 2 * 2],16),
int(str(hexes[52087 + (player_index + 1) * 2]) + (hexes[52086 + (player_index + 1) * 2]),16)
])
playerstats['away'] = players
# add +/-
plusminus = {}
# initialize all player containers
temph = []
tempa = []
for i in range(0, 25):
temph.append(0)
tempa.append(0)
plusminus['home'] = temph
plusminus['away'] = tempa
# parse data
pmsize = int(str(hexes[57093]) + str(hexes[57092]),16)
for i in range(57094, 57094 + pmsize, 14):
if (hexes[i + 1][:1] == '0' or hexes[i + 1][:1] == '8') and int(hexes[i + 1][1:],16) <= 2:
if hexes[i + 1][:1] == '0':
scoringteam = 'home'
elif hexes[i + 1][:1] == '8':
scoringteam = 'away'
for h in range(2,8):
if int(hexes[i + h],16) != 255:
plusminus['home'][int(hexes[i + h],16)] = plusminus['home'][int(hexes[i + h],16)] + (1 if scoringteam == 'home' else -1)
for a in range(8,14):
if int(hexes[i + a],16) != 255:
plusminus['away'][int(hexes[i + a],16)] = plusminus['away'][int(hexes[i + a],16)] + (1 if scoringteam == 'away' else -1)
# apend +/- to playerstats
for key, val in playerstats.items():
for player_index, val in enumerate(playerstats[key]):
playerstats[key][player_index].append(plusminus[key][player_index])
# run SQL
sql = "INSERT INTO playerstats (game_id, player_id, goals, assists, sog, checksfor, checksagainst, toi, plusminus) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
mycursor.executemany(sql, playerstats['home'])
mycursor.executemany(sql, playerstats['away'])
mydb.commit()
# print('Player Stats successfully uploaded')
|
the-stack_0_8305 | import unittest
from transform.transformers.common_software import PCKTransformer
class TestPckTransformer(unittest.TestCase):
@staticmethod
def test_round_to_nearest_whole_number():
"""Tests the round_to_nearest_whole_number function in PCKTransformer on a variety of numbers"""
scenarios = [
('200', '200'),
('1234.1', '1234'),
('1004.5', '1005'),
('34955.8', '34956')
]
for input_value, output_value in scenarios:
assert str(PCKTransformer.round_to_nearest_whole_number(input_value)) == output_value
def test_get_cs_form_id_passes(self):
scenarios = [
('023', '0102', 'RSI5B'),
('139', '0001', 'Q01B'),
('019', '0018', '0018'),
('019', '0019', '0019'),
('019', '0020', '0020')
]
for survey_id, instrument_id, expected_form_id in scenarios:
survey = {'survey_id': survey_id}
response = {'collection': {'instrument_id': instrument_id}}
pck_transformer = PCKTransformer(survey, response)
form_id = pck_transformer.get_cs_form_id()
assert form_id == expected_form_id
def test_get_cs_form_id_invalid_survey(self):
survey = {'survey_id': 23}
response = {'collection': {'instrument_id': '0102'}}
pck_transformer = PCKTransformer(survey, response)
with self.assertLogs(level='ERROR') as cm:
form_id = pck_transformer.get_cs_form_id()
self.assertEqual(form_id, None)
self.assertIn('Invalid survey id', cm.output[0])
def test_get_cs_form_id_invalid_instrument(self):
survey = {'survey_id': '023'}
response = {'collection': {'instrument_id': '000'}}
pck_transformer = PCKTransformer(survey, response)
with self.assertLogs(level='ERROR') as cm:
form_id = pck_transformer.get_cs_form_id()
self.assertEqual(form_id, None)
self.assertIn('Invalid instrument id', cm.output[0])
# QCAS
survey = {'survey_id': '019'}
response = {'collection': {'instrument_id': '0021'}}
pck_transformer = PCKTransformer(survey, response)
with self.assertLogs(level='ERROR') as cm:
form_id = pck_transformer.get_cs_form_id()
self.assertEqual(form_id, None)
self.assertIn("Invalid instrument id", cm.output[0])
def test_pck_transformer_cannot_change_the_data_it_is_passed(self):
"""Tests that pck does not modify the data it is passed.
Without the deep copy pck integer rounding will apply to the passed in data
and hence get displayed in images"""
survey = {'survey_id': '023'}
response = {'collection': {'instrument_id': '000'}, 'data': {'item1': 'value1'}}
pck_transformer = PCKTransformer(survey, response)
pck_transformer.data['item1'] = 'new value'
assert response['data']['item1'] == 'value1'
def test_pck_transformer_discards_qcas_confirmation_question(self):
"""
For QCAS, the questions 'd681' and 'd12' does not need to be transformed,
hence can be deleted.
"""
survey = {'survey_id': '019'}
response = {'collection': {'instrument_id': '000'}, 'data': {'681': '100', 'd681': 'Yes', 'd12': 'Yes'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'681': '100', 'd681': 'Yes', 'd12': 'Yes'}
pck_transformer.evaluate_confirmation_questions()
assert pck_transformer.data == {'681': '100'}
@staticmethod
def test_pck_transformer_parse_yes_no_questions():
"""
For QSS (Stocks), qcode 15 needs to converted from Yes/No to 1/0 for the pck.
"""
survey = {'survey_id': '017'}
# qcode 15 = Yes case
response = {'collection': {'instrument_id': '0001'}, 'data': {'15': 'Yes', '146': 'Comment question', '139': '13900'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'15': 'Yes', '146': 'Comment question', '139': '13900'}
pck_transformer.parse_yes_no_questions()
assert pck_transformer.data == {'15': '1', '146': 'Comment question', '139': '13900'}
# qcode 15 = No case
response = {'collection': {'instrument_id': '0001'}, 'data': {'15': 'No', '146': 'Comment question', '139': '13900'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'15': 'No', '146': 'Comment question', '139': '13900'}
pck_transformer.parse_yes_no_questions()
assert pck_transformer.data == {'15': '0', '146': 'Comment question', '139': '13900'}
@staticmethod
def test_pck_transformer_parse_yes_no_construction_questions():
survey = {'survey_id': '228'}
# q code 902, 903, 904 yes
response = {'collection': {'instrument_id': '0001'}, 'data': {'901': 'Yes, I can report for this period',
'902': 'Yes, we carried out work on housing',
'903': 'Yes, we carried out work on infrastructure',
'904': 'Yes, we carried out other construction work'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'901': 'Yes, I can report for this period',
'902': 'Yes, we carried out work on housing',
'903': 'Yes, we carried out work on infrastructure',
'904': 'Yes, we carried out other construction work'}
pck_transformer.parse_yes_no_questions()
assert pck_transformer.data == {'901': '1',
'902': '1',
'903': '1',
'904': '1'}
# q code 902, 903, 904 no
response = {'collection': {'instrument_id': '0001'}, 'data': {'901': 'Yes, I can report for this period',
'902': 'No, we did not carry out work on housing',
'903': 'No, we did not carry out work on infrastructure',
'904': 'No, we did not carry out other construction work'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'901': 'Yes, I can report for this period',
'902': 'No, we did not carry out work on housing',
'903': 'No, we did not carry out work on infrastructure',
'904': 'No, we did not carry out other construction work'}
pck_transformer.parse_yes_no_questions()
assert pck_transformer.data == {'901': '1',
'902': '2',
'903': '2',
'904': '2'}
# q code 902, 903, 904 missing
response = {'collection': {'instrument_id': '0001'}, 'data': {'901': 'Yes, I can report for this period'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'901': 'Yes, I can report for this period'}
pck_transformer.parse_yes_no_questions()
assert pck_transformer.data == {'901': '1',
'902': '2',
'903': '2',
'904': '2'}
# q code 902, 903 no 904 yes
response = {'collection': {'instrument_id': '0001'}, 'data': {'901': 'Yes, I can report for this period',
'902': 'No, we did not carry out work on housing',
'903': 'No, we did not carry out work on infrastructure',
'904': 'Yes, we carried out other construction work'}}
pck_transformer = PCKTransformer(survey, response)
assert pck_transformer.data == {'901': 'Yes, I can report for this period',
'902': 'No, we did not carry out work on housing',
'903': 'No, we did not carry out work on infrastructure',
'904': 'Yes, we carried out other construction work'}
pck_transformer.parse_yes_no_questions()
assert pck_transformer.data == {'901': '1',
'902': '2',
'903': '2',
'904': '1'}
def test_pck_transformer_parse_negative_values(self):
"""If any values in the survey are negative, they should be replaced with an all 9's string that is 11 characters long
"""
survey = {'survey_id': '019'}
response = {'collection': {'instrument_id': '000'},
'data': {'681': '-100', '703': '-1234', '704': '-12345', '707': '-123456', '708': '-0', '709': '1234', '710': '-123word'}}
pck_transformer = PCKTransformer(survey, response)
self.assertEqual(pck_transformer.data, {
'681': '-100',
'703': '-1234',
'704': '-12345',
'707': '-123456',
'708': '-0',
'709': '1234',
'710': '-123word'})
pck_transformer.parse_negative_values()
self.assertEqual(pck_transformer.data, {
'681': '99999999999',
'703': '99999999999',
'704': '99999999999',
'707': '99999999999',
'708': '99999999999',
'709': '1234',
'710': '-123word'})
def test_pck_transformer_preprocess_comments(self):
"""Tests 2 things. First, if every comment question (147 and all 146x) is present and 146 IS NOT in the data, then 146 is added.
Second, all of the comment questions are removed from the submission as they're not put into the pck file.
"""
survey = {'survey_id': '019'}
response = {'collection': {'instrument_id': '000'},
'data': {
"11": "03/07/2018",
"12": "01/10/2018",
"681": "123456.78",
"146a": "Yes",
"146b": "Start or end of a long term project",
"146c": "Site changes, for example, openings, closures, refurbishments or upgrades",
"146d": "End of accounting period or financial year",
"146e": "Normal movement for time of year",
"146f": "Change of business structure, merger, or takeover",
"146g": "One off or unusual investment",
"146h": "Introduction / removal of new legislation / incentive",
"146i": "Availability of credit",
"146j": "Overspend during the previous quarter",
"146k": "Other",
'147': "Yes",
'd12': 'Yes'}}
pck_transformer = PCKTransformer(survey, response)
self.assertEqual(pck_transformer.data, {
"11": "03/07/2018",
"12": "01/10/2018",
"681": "123456.78",
"146a": "Yes",
"146b": "Start or end of a long term project",
"146c": "Site changes, for example, openings, closures, refurbishments or upgrades",
"146d": "End of accounting period or financial year",
"146e": "Normal movement for time of year",
"146f": "Change of business structure, merger, or takeover",
"146g": "One off or unusual investment",
"146h": "Introduction / removal of new legislation / incentive",
"146i": "Availability of credit",
"146j": "Overspend during the previous quarter",
"146k": "Other",
'147': "Yes",
'd12': 'Yes'})
pck_transformer.preprocess_comments()
self.assertEqual(pck_transformer.data, {
"11": "03/07/2018",
"12": "01/10/2018",
"146": 1,
"681": "123456.78",
'd12': 'Yes'})
def test_pck_transformer_calculates_total_playback_qcas(self):
"""
For QCAS, downstream needs the calculated values for both acquisitions
and proceeds from disposals to be sent in the PCK.
"""
survey = {'survey_id': '019'}
response = {
"collection": {
"instrument_id": "0020"
},
"data": {
"11": "03/07/2018",
"12": "01/10/2018",
"146": "A lot of changes.",
# Disposals
"689": "499",
"696": "500",
"704": "12345.67",
"708": "12345500",
"710": "-499",
"712": "-12345.67",
# Construction
"681": "1000",
# Acquisitions
"688": "1500",
"695": "1500",
"703": "1500",
"707": "1500",
"709": "1500",
"711": "1500",
# Mineral
"697": "-1500",
"146a": "Yes",
"146b": "Start or end of a long term project",
"146c": "Site changes, for example, openings, closures, refurbishments or upgrades",
"146d": "End of accounting period or financial year",
"146e": "Normal movement for time of year",
"146f": "Change of business structure, merger, or takeover",
"146g": "One off or unusual investment",
"146h": "Introduction / removal of new legislation / incentive",
"146i": "Availability of credit",
"146j": "Overspend during the previous quarter",
"146k": "Other",
"d12": "Yes",
"d681": "Yes"
}
}
pck_transformer = PCKTransformer(survey, response)
pck_transformer.round_numeric_values()
assert pck_transformer.data['689'] == '0'
assert pck_transformer.data['696'] == '1'
assert pck_transformer.data['704'] == '12'
assert pck_transformer.data['708'] == '12346'
assert pck_transformer.data['710'] == '-0'
assert pck_transformer.data['712'] == '-12'
pck_transformer.calculate_total_playback()
# Total value of acquisitions questions for only machinery and equipments section
assert pck_transformer.data['714'] == '12'
# Total value of disposals questions for only machinery and equipments section
assert pck_transformer.data['715'] == '12347'
# Total value of all acquisitions questions
assert pck_transformer.data['692'] == '11'
# Total value of all disposals questions (same as '715' since constructions section and minerals sections does not have disposals question)
assert pck_transformer.data['693'] == '12347'
@staticmethod
def test_pck_transformer_calculates_total_playback_qss():
"""
For QSS (Stocks), downstream needs to calculate the start and end of period totals.
The fields that are added together are defined in a dictionary in the pck_transformer
"""
scenarios = ["0001", "0002"]
for form_type in scenarios:
survey = {'survey_id': "017"}
response = {
"collection": {
"instrument_id": form_type
},
"data": {
"15": "Yes",
"139": "7300",
"140": "7680",
"144": "2000",
"145": "2205",
"146": "A lot of changes.",
"149": "1800",
"150": "12205",
}
}
pck_transformer = PCKTransformer(survey, response)
pck_transformer.calculate_total_playback()
assert pck_transformer.data == {
'15': "Yes",
'65': '11100',
'66': '22090',
'139': '7300',
'140': '7680',
'144': '2000',
'145': '2205',
'146': 'A lot of changes.',
'149': '1800',
'150': '12205'
}
def test_pck_transformer_total_playback_qss_missing_data_from_mapping(self):
"""
For QSS (Stocks), downstream needs to calculate the start and end of period totals.
It does this with a mapping in the pck_transformer. If a new formtype is added but it's not
added to the mapping or a 'start' or 'end' key isn't present then a KeyError exception is thrown.
"""
scenarios = ["9999", "0033"]
for form_type in scenarios:
survey = {'survey_id': "017"}
response = {
"collection": {
"instrument_id": form_type
},
"data": {
"15": "Yes",
"139": "7300",
"140": "7680",
"144": "2000",
"145": "2205",
"146": "A lot of changes.",
"149": "1800",
"150": "12205",
}
}
pck_transformer = PCKTransformer(survey, response)
pck_transformer.qss_questions = {
"0033": {
"end": ['140', '145', '150']
}
}
with self.assertRaises(KeyError):
pck_transformer.calculate_total_playback()
@staticmethod
def test_pck_transformer_round_numeric_values_qpses():
"""
For QPSES, a number of values require rounding before being sent downstream. These should round up on a .5 answer.
"""
scenarios = ["160", "165", "169"]
for survey_id in scenarios:
survey = {'survey_id': survey_id}
response = {
"collection": {
"instrument_id": "0020"
},
"data": {
"60": 50.5,
"561": 50.3,
"562": 74.49,
"661": 80.1,
"662": 34.8,
"146": "A lot of changes.",
}
}
pck_transformer = PCKTransformer(survey, response)
pck_transformer.round_numeric_values()
assert pck_transformer.data == {
'60': '51',
'146': 'A lot of changes.',
'561': '50',
'562': '74',
'661': '80',
'662': '35'
}
def test_pck_transformer_round_numeric_values_qss(self):
"""
For QSS (Stocks), a number of values require rounding before being sent downstream. These should
be rounded to the nearest thousand.
For example:
- 12100 -> 12000
- 12500 -> 13000
- 12501 -> 13000
"""
scenarios = ["0001", "0002"]
for form_type in scenarios:
survey = {'survey_id': "017"}
response = {
"collection": {
"instrument_id": form_type
},
"data": {
"15": "Yes",
"65": "311500",
"66": "313103",
"139": "7300",
"140": "7680",
"144": "2000",
"145": "2205",
"146": "A lot of changes.",
"149": "1800",
"150": "12205",
}
}
pck_transformer = PCKTransformer(survey, response)
pck_transformer.round_numeric_values()
assert pck_transformer.data == {
'15': "Yes",
'65': '312',
'66': '313',
'139': '7',
'140': '8',
'144': '2',
'145': '2',
'146': 'A lot of changes.',
'149': '2',
'150': '12'
}
|
the-stack_0_8306 | """Tests for the flux_led integration."""
from __future__ import annotations
import asyncio
from contextlib import contextmanager
import datetime
from typing import Callable
from unittest.mock import AsyncMock, MagicMock, patch
from flux_led import DeviceType
from flux_led.aio import AIOWifiLedBulb
from flux_led.const import (
COLOR_MODE_CCT as FLUX_COLOR_MODE_CCT,
COLOR_MODE_RGB as FLUX_COLOR_MODE_RGB,
)
from flux_led.models_db import MODEL_MAP
from flux_led.protocol import LEDENETRawState, PowerRestoreState, PowerRestoreStates
from flux_led.scanner import FluxLEDDiscovery
from homeassistant.components import dhcp
from homeassistant.core import HomeAssistant
MODULE = "homeassistant.components.flux_led"
MODULE_CONFIG_FLOW = "homeassistant.components.flux_led.config_flow"
IP_ADDRESS = "127.0.0.1"
MODEL_NUM_HEX = "0x35"
MODEL = "AZ120444"
MODEL_DESCRIPTION = "Bulb RGBCW"
MAC_ADDRESS = "aa:bb:cc:dd:ee:ff"
FLUX_MAC_ADDRESS = "aabbccddeeff"
SHORT_MAC_ADDRESS = "ddeeff"
DEFAULT_ENTRY_TITLE = f"{MODEL_DESCRIPTION} {SHORT_MAC_ADDRESS}"
DHCP_DISCOVERY = dhcp.DhcpServiceInfo(
hostname=MODEL,
ip=IP_ADDRESS,
macaddress=MAC_ADDRESS,
)
FLUX_DISCOVERY_PARTIAL = FluxLEDDiscovery(
ipaddr=IP_ADDRESS,
model=MODEL,
id=FLUX_MAC_ADDRESS,
model_num=None,
version_num=None,
firmware_date=None,
model_info=None,
model_description=None,
)
FLUX_DISCOVERY = FluxLEDDiscovery(
ipaddr=IP_ADDRESS,
model=MODEL,
id=FLUX_MAC_ADDRESS,
model_num=0x25,
version_num=0x04,
firmware_date=datetime.date(2021, 5, 5),
model_info=MODEL,
model_description=MODEL_DESCRIPTION,
remote_access_enabled=True,
remote_access_host="the.cloud",
remote_access_port=8816,
)
def _mocked_bulb() -> AIOWifiLedBulb:
bulb = MagicMock(auto_spec=AIOWifiLedBulb)
async def _save_setup_callback(callback: Callable) -> None:
bulb.data_receive_callback = callback
bulb.device_type = DeviceType.Bulb
bulb.requires_turn_on = True
bulb.async_setup = AsyncMock(side_effect=_save_setup_callback)
bulb.effect_list = ["some_effect"]
bulb.async_set_time = AsyncMock()
bulb.async_set_music_mode = AsyncMock()
bulb.async_set_custom_pattern = AsyncMock()
bulb.async_set_preset_pattern = AsyncMock()
bulb.async_set_effect = AsyncMock()
bulb.async_set_white_temp = AsyncMock()
bulb.async_set_brightness = AsyncMock()
bulb.async_stop = AsyncMock()
bulb.async_update = AsyncMock()
bulb.async_turn_off = AsyncMock()
bulb.async_turn_on = AsyncMock()
bulb.async_set_levels = AsyncMock()
bulb.async_set_zones = AsyncMock()
bulb.async_disable_remote_access = AsyncMock()
bulb.async_enable_remote_access = AsyncMock()
bulb.min_temp = 2700
bulb.max_temp = 6500
bulb.getRgb = MagicMock(return_value=[255, 0, 0])
bulb.getRgbw = MagicMock(return_value=[255, 0, 0, 50])
bulb.getRgbww = MagicMock(return_value=[255, 0, 0, 50, 0])
bulb.getRgbcw = MagicMock(return_value=[255, 0, 0, 0, 50])
bulb.rgb = (255, 0, 0)
bulb.rgb_unscaled = (255, 0, 0)
bulb.rgbw = (255, 0, 0, 50)
bulb.rgbww = (255, 0, 0, 50, 0)
bulb.rgbcw = (255, 0, 0, 0, 50)
bulb.color_temp = 2700
bulb.getWhiteTemperature = MagicMock(return_value=(2700, 128))
bulb.brightness = 128
bulb.model_num = 0x35
bulb.model_data = MODEL_MAP[0x35]
bulb.effect = None
bulb.speed = 50
bulb.model = "Bulb RGBCW (0x35)"
bulb.version_num = 8
bulb.speed_adjust_off = True
bulb.rgbwcapable = True
bulb.color_modes = {FLUX_COLOR_MODE_RGB, FLUX_COLOR_MODE_CCT}
bulb.color_mode = FLUX_COLOR_MODE_RGB
bulb.raw_state = LEDENETRawState(
0, 0x35, 0, 0x61, 0x5, 50, 255, 0, 0, 50, 8, 0, 0, 0
)
return bulb
def _mocked_switch() -> AIOWifiLedBulb:
switch = MagicMock(auto_spec=AIOWifiLedBulb)
async def _save_setup_callback(callback: Callable) -> None:
switch.data_receive_callback = callback
switch.device_type = DeviceType.Switch
switch.power_restore_states = PowerRestoreStates(
channel1=PowerRestoreState.LAST_STATE,
channel2=PowerRestoreState.LAST_STATE,
channel3=PowerRestoreState.LAST_STATE,
channel4=PowerRestoreState.LAST_STATE,
)
switch.requires_turn_on = True
switch.async_set_time = AsyncMock()
switch.async_reboot = AsyncMock()
switch.async_setup = AsyncMock(side_effect=_save_setup_callback)
switch.async_set_power_restore = AsyncMock()
switch.async_stop = AsyncMock()
switch.async_update = AsyncMock()
switch.async_turn_off = AsyncMock()
switch.async_turn_on = AsyncMock()
switch.model_num = 0x97
switch.model_data = MODEL_MAP[0x97]
switch.model = "Switch (0x97)"
switch.version_num = 0x97
switch.raw_state = LEDENETRawState(
0, 0x97, 0, 0x61, 0x97, 50, 255, 0, 0, 50, 8, 0, 0, 0
)
return switch
async def async_mock_device_turn_off(hass: HomeAssistant, bulb: AIOWifiLedBulb) -> None:
"""Mock the device being off."""
bulb.is_on = False
bulb.raw_state._replace(power_state=0x24)
bulb.data_receive_callback()
await hass.async_block_till_done()
async def async_mock_device_turn_on(hass: HomeAssistant, bulb: AIOWifiLedBulb) -> None:
"""Mock the device being on."""
bulb.is_on = True
bulb.raw_state._replace(power_state=0x23)
bulb.data_receive_callback()
await hass.async_block_till_done()
async def async_mock_effect_speed(
hass: HomeAssistant, bulb: AIOWifiLedBulb, effect: str, speed: int
) -> None:
"""Mock the device being on with an effect."""
bulb.speed = speed
bulb.effect = effect
bulb.data_receive_callback()
await hass.async_block_till_done()
def _patch_discovery(device=None, no_device=False):
async def _discovery(*args, **kwargs):
if no_device:
raise OSError
return [] if no_device else [device or FLUX_DISCOVERY]
@contextmanager
def _patcher():
with patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.async_scan",
new=_discovery,
), patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.getBulbInfo",
return_value=[] if no_device else [device or FLUX_DISCOVERY],
):
yield
return _patcher()
def _patch_wifibulb(device=None, no_device=False):
def _wifi_led_bulb(*args, **kwargs):
bulb = _mocked_bulb()
if no_device:
bulb.async_setup = AsyncMock(side_effect=asyncio.TimeoutError)
return bulb
return device if device else _mocked_bulb()
return patch("homeassistant.components.flux_led.AIOWifiLedBulb", new=_wifi_led_bulb)
|
the-stack_0_8307 | import argparse
import mmcv
import torch
import numpy as np
from mmedit.apis import init_model, restoration_inference
from mmedit.core import tensor2img
def parse_args():
parser = argparse.ArgumentParser(description='Restoration demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('img_path', help='path to input image file')
parser.add_argument('save_path', help='path to save restoration result')
parser.add_argument(
'--imshow', action='store_true', help='whether show image with opencv')
parser.add_argument('--device', type=int, default=0, help='CUDA device id')
args = parser.parse_args()
return args
def main():
args = parse_args()
model = init_model(
args.config, args.checkpoint, device=torch.device('cuda', args.device))
output = restoration_inference(model, args.img_path)
output = tensor2img(output)
# print(np.shape(output))
mmcv.imwrite(output, args.save_path)
if args.imshow:
mmcv.imshow(output, 'predicted restoration result')
if __name__ == '__main__':
main()
|
the-stack_0_8308 | import sys
import pytest
import os
import shutil
from unittest import mock
from sea import cli
def test_cmd_server(app):
sys.argv = "sea s".split()
with mock.patch("sea.cmds.Server", autospec=True) as mocked:
assert cli.main() == 0
mocked.return_value.run.assert_called_with()
def test_cmd_console(app):
sys.argv = "sea c".split()
mocked = mock.MagicMock()
with mock.patch.dict("sys.modules", {"IPython": mocked}):
assert cli.main() == 0
assert mocked.embed.called
def test_cmd_generate():
sys.argv = (
"sea g -I /path/to/protos -I /another/path/to/protos "
"hello.proto test.proto"
).split()
with mock.patch("grpc_tools.protoc.main", return_value=0) as mocked:
assert cli.main() == 0
import grpc_tools
well_known_path = os.path.join(
os.path.dirname(grpc_tools.__file__), "_proto"
)
proto_out = os.path.join(os.getcwd(), "protos")
cmd = [
"grpc_tools.protoc",
"--proto_path",
"/path/to/protos",
"--proto_path",
"/another/path/to/protos",
"--proto_path",
well_known_path,
"--python_out",
proto_out,
"--grpc_python_out",
proto_out,
"hello.proto",
"test.proto",
]
mocked.assert_called_with(cmd)
def test_cmd_new():
shutil.rmtree("tests/myproject", ignore_errors=True)
sys.argv = ("sea new tests/myproject" " --skip-git --skip-peewee").split()
assert cli.main() == 0
correct_code = """\
# import myproject_pb2
# import myproject_pb2_grpc
# from sea.servicer import ServicerMeta
# class MyprojectServicer(myproject_pb2_grpc.MyprojectServicer, metaclass=ServicerMeta):
# pass
"""
with open("./tests/myproject/app/servicers.py", "r") as f:
content = f.read()
from textwrap import dedent
assert content == dedent(correct_code).rstrip()
assert not os.path.exists("./tests/myproject/condfigs/default/peewee.py")
assert os.path.exists("./tests/myproject/app/async_tasks.py")
assert os.path.exists("./tests/myproject/app/buses.py")
correct_code = """\
sea
cachext
celery
raven
"""
with open("./tests/myproject/requirements.txt", "r") as f:
content = f.read()
assert content == dedent(correct_code)
shutil.rmtree("tests/myproject")
def test_cmd_job(app):
with mock.patch("os.getcwd", return_value=app.root_path):
sys.argv = "sea plusone -n 100".split()
assert cli.main() is None
assert app.config.get("NUMBER") == 101
sys.argv = "sea config_hello".split()
assert isinstance(cli.main(), cli.JobException)
class EntryPoint:
def load(self):
@cli.jobm.job("xyz")
def f2():
app.config["XYZ"] = "hello"
return f2
def new_entry_iter(name):
return [EntryPoint()]
with mock.patch("pkg_resources.iter_entry_points", new=new_entry_iter):
sys.argv = "sea xyz".split()
assert cli.main() is None
assert app.config.get("XYZ") == "hello"
def test_main():
sys.argv = "sea -h".split()
with pytest.raises(SystemExit):
cli.main()
# no arguments scenes
sys.argv = ["sea"]
with pytest.raises(SystemExit):
cli.main()
|
the-stack_0_8310 | import time
import argparse
import sys
import os
import os.path as osp
import numpy as np
import torch
import pandas as pd
from training.linear_regression import linear_regression
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--domain', type=str, default='uci') # 'uci'
parser.add_argument('--data', type=str, default='housing') # 'pks', 'cancer', 'housing', 'wine'
parser.add_argument('--method', type=str, default='mean')
parser.add_argument('--train_edge', type=float, default=0.7)
parser.add_argument('--train_y', type=float, default=0.7)
parser.add_argument('--log_dir', type=str, default='lry0')
parser.add_argument('--load_dir', type=str, default='0')
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
seed = args.seed
np.random.seed(seed)
torch.manual_seed(seed)
if args.domain == 'uci':
from uci.uci_data import load_data
data = load_data(args)
log_path = './{}/test/{}/{}_{}/'.format(args.domain,args.data,args.method,args.log_dir)
os.mkdir(log_path)
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(osp.join(log_path, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
load_path = './{}/test/{}/{}/'.format(args.domain,args.data,args.load_dir)
linear_regression(data, args, log_path, load_path)
if __name__ == '__main__':
main() |
the-stack_0_8311 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Subscription(object):
def __init__(self, consumerGroupId=None, endPoint=None, messageInvisibleTimeInSeconds=None, subscriptionType=None, tags=None, dlqEnable=None, maxRetryTimes=None, createTime=None, lastUpdateTime=None, consumerNumbers=None):
"""
:param consumerGroupId: (Optional) consumerGroupId
:param endPoint: (Optional) endPoint
:param messageInvisibleTimeInSeconds: (Optional) messageInvisibleTimeInSeconds
:param subscriptionType: (Optional) subscriptionType
:param tags: (Optional) tags
:param dlqEnable: (Optional) 是否开启死信队列
:param maxRetryTimes: (Optional) 最大重试次数
:param createTime: (Optional) 创建时间
:param lastUpdateTime: (Optional) 最后更新时间
:param consumerNumbers: (Optional) 在线consumer个数
"""
self.consumerGroupId = consumerGroupId
self.endPoint = endPoint
self.messageInvisibleTimeInSeconds = messageInvisibleTimeInSeconds
self.subscriptionType = subscriptionType
self.tags = tags
self.dlqEnable = dlqEnable
self.maxRetryTimes = maxRetryTimes
self.createTime = createTime
self.lastUpdateTime = lastUpdateTime
self.consumerNumbers = consumerNumbers
|
the-stack_0_8312 | #!/usr/bin/env python3
# encoding: utf-8
"""
A module for SSHing into servers.
Used for giving commands, uploading, and downloading files.
Todo:
* delete scratch files of a failed job: ssh nodeXX; rm scratch/dhdhdhd/job_number
"""
import datetime
import logging
import os
import re
import time
import paramiko
from arc.common import get_logger
from arc.exceptions import InputError, ServerError
from arc.settings import servers, check_status_command, submit_command, submit_filename, delete_command
logger = get_logger()
class SSHClient(object):
"""
This is a class for communicating with remote servers via SSH.
Args:
server (str): The server name as specified in ARCs's settings file under ``servers`` as a key.
Attributes:
server (str): The server name as specified in ARCs's settings file under ``servers`` as a key.
address (str): The server's address.
un (str): The username to use on the server.
key (str): A path to a file containing the RSA SSH private key to the server.
"""
def __init__(self, server=''):
if server == '':
raise ValueError('A server name must be specified')
if server not in servers.keys():
raise ValueError(f'Server name invalid. Currently defined servers are: {servers.keys()}')
self.server = server
self.address = servers[server]['address']
self.un = servers[server]['un']
self.key = servers[server]['key']
logging.getLogger("paramiko").setLevel(logging.WARNING)
def send_command_to_server(self, command, remote_path=''):
"""
Send commands to server. `command` is either a sting or an array of string commands to send.
If remote_path is not an empty string, the command will be executed in the directory path it points to.
Returns lists of stdout, stderr corresponding to the commands sent.
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_system_host_keys(filename=self.key)
try:
ssh.connect(hostname=self.address, username=self.un)
except:
return '', 'paramiko failed to connect'
if isinstance(command, list):
command = '; '.join(command)
if remote_path != '':
# execute command in remote_path directory.
# Since each `.exec_command()` is a single session, `cd` has to be added to all commands.
command = f'cd {remote_path}; {command}'
try:
_, stdout, stderr = ssh.exec_command(command)
except: # SSHException: Timeout opening channel.
try: # try again
_, stdout, stderr = ssh.exec_command(command)
except:
return '', 'ssh timed-out after two trials'
stdout = stdout.readlines()
stderr = stderr.readlines()
ssh.close()
return stdout, stderr
def upload_file(self, remote_file_path, local_file_path='', file_string=''):
"""
Upload `local_file_path` or the contents of `file_string` to `remote_file_path`.
Either `file_string` or `local_file_path` must be given.
"""
if local_file_path and not os.path.isfile(local_file_path):
raise InputError(f'Cannot upload a non-existing file. '
f'Check why file in path {local_file_path} is missing.')
sftp, ssh = self.connect()
i, max_times_to_try = 1, 30
success = False
sleep_time = 10 # seconds
while i < max_times_to_try:
try:
write_file(sftp, remote_file_path, local_file_path, file_string)
except IOError:
logger.error(f'Could not upload file {local_file_path} to {self.server}!')
logger.error(f'ARC is sleeping for {sleep_time * i} seconds before re-trying, '
f'please check your connectivity.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i) # in seconds
else:
success = True
i = 1000
i += 1
if not success:
raise ServerError(f'Could not write file {remote_file_path} on {self.server}. '
f'Tried {max_times_to_try} times.')
sftp.close()
ssh.close()
def download_file(self, remote_file_path, local_file_path):
"""
Download a file from `remote_file_path` to `local_file_path`.
"""
i, max_times_to_try = 1, 30
success = False
sleep_time = 10 # seconds
while i < 30:
self._download_file(remote_file_path, local_file_path)
if os.path.isfile(local_file_path):
success = True
i = 1000
else:
logger.error(f'Could not download file {remote_file_path} from {self.server}!')
logger.error(f'ARC is sleeping for {sleep_time * i} seconds before re-trying, '
f'please check your connectivity.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i) # in seconds
i += 1
if not success:
raise ServerError(f'Could not download file {remote_file_path} from {self.server}. '
f'Tried {max_times_to_try} times.')
def _download_file(self, remote_file_path, local_file_path):
"""
Download a file from `remote_file_path` to `local_file_path`.
"""
sftp, ssh = self.connect()
try:
sftp.get(remotepath=remote_file_path, localpath=local_file_path)
except IOError:
logger.debug(f'Got an IOError when trying to download file {remote_file_path} from {self.server}')
sftp.close()
ssh.close()
def read_remote_file(self, remote_path, filename):
"""
Read a remote file. `remote_path` is the remote path (required), a `filename` is also required.
Returns the file's content.
"""
sftp, ssh = self.connect()
full_path = os.path.join(remote_path, filename)
with sftp.open(full_path, 'r') as f_remote:
content = f_remote.readlines()
sftp.close()
ssh.close()
return content
def check_job_status(self, job_id):
"""
A modulator method of _check_job_status()
"""
i = 1
sleep_time = 1 # minutes
while i < 30:
result = self._check_job_status(job_id)
if result == 'connection error':
logger.error(f'ARC is sleeping for {sleep_time * i} min before re-trying, '
f'please check your connectivity.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i * 60) # in seconds
else:
i = 1000
i += 1
return result
def _check_job_status(self, job_id):
"""
Possible statuses: `before_submission`, `running`, `errored on node xx`, `done`
Status line formats:
pharos: '540420 0.45326 xq1340b user_name r 10/26/2018 11:08:30 [email protected]'
rmg: '14428 debug xq1371m2 user_name R 50-04:04:46 1 node06'
"""
cmd = check_status_command[servers[self.server]['cluster_soft']] + ' -u $USER'
stdout, stderr = self.send_command_to_server(cmd)
if stderr:
logger.info('\n\n')
logger.error(f'Could not check status of job {job_id} due to {stderr}')
return 'connection error'
return check_job_status_in_stdout(job_id=job_id, stdout=stdout, server=self.server)
def delete_job(self, job_id):
"""
Deletes a running job
"""
cmd = delete_command[servers[self.server]['cluster_soft']] + ' ' + str(job_id)
self.send_command_to_server(cmd)
def check_running_jobs_ids(self):
"""
Return a list of ``int`` representing job IDs of all jobs submitted by the user on a server
"""
running_jobs_ids = list()
cmd = check_status_command[servers[self.server]['cluster_soft']] + ' -u $USER'
stdout = self.send_command_to_server(cmd)[0]
for i, status_line in enumerate(stdout):
if (servers[self.server]['cluster_soft'].lower() == 'slurm' and i > 0)\
or (servers[self.server]['cluster_soft'].lower() == 'oge' and i > 1):
running_jobs_ids.append(int(status_line.split()[0]))
return running_jobs_ids
def submit_job(self, remote_path):
"""Submit a job"""
job_status = ''
job_id = 0
cmd = submit_command[servers[self.server]['cluster_soft']] + ' '\
+ submit_filename[servers[self.server]['cluster_soft']]
stdout, stderr = self.send_command_to_server(cmd, remote_path)
if len(stderr) > 0 or len(stdout) == 0:
logger.warning(f'Got stderr when submitting job:\n{stderr}')
job_status = 'errored'
for line in stderr:
if 'Requested node configuration is not available' in line:
logger.warning(f'User may be requesting more resources than are available. Please check server '
f'settings, such as cpus and memory, in ARC/arc/settings.py')
elif 'submitted' in stdout[0].lower():
job_status = 'running'
if servers[self.server]['cluster_soft'].lower() == 'oge':
job_id = int(stdout[0].split()[2])
elif servers[self.server]['cluster_soft'].lower() == 'slurm':
job_id = int(stdout[0].split()[3])
else:
raise ValueError(f'Unrecognized cluster software {servers[self.server]["cluster_soft"]}')
return job_status, job_id
def connect(self):
"""A helper function for calling self.try_connecting until successful"""
times_tried = 0
max_times_to_try = 1440 # continue trying for 24 hrs...
interval = 60 # wait 60 sec between trials
while times_tried < max_times_to_try:
times_tried += 1
try:
sftp, ssh = self.try_connecting()
except Exception as e:
if not times_tried % 10:
logger.info(f'Tried connecting to {self.server} {times_tried} times with no success...'
f'\nGot: {e}')
else:
print(f'Tried connecting to {self.server} {times_tried} times with no success...'
f'\nGot: {e}')
else:
logger.debug(f'Successfully connected to {self.server} at the {times_tried} trial.')
return sftp, ssh
time.sleep(interval)
raise ServerError(f'Could not connect to server {self.server} even after {times_tried} trials.')
def try_connecting(self):
"""A helper function for connecting via paramiko, returns the `sftp` and `ssh` objects"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_system_host_keys(filename=self.key)
try:
ssh.connect(hostname=self.address, username=self.un)
except:
# This sometimes gives "SSHException: Error reading SSH protocol banner[Error 104] Connection reset by peer"
# Try again:
ssh.connect(hostname=self.address, username=self.un)
sftp = ssh.open_sftp()
return sftp, ssh
def get_last_modified_time(self, remote_file_path):
"""returns the last modified time of `remote_file_path` in a datetime format"""
sftp, ssh = self.connect()
try:
timestamp = sftp.stat(remote_file_path).st_mtime
except IOError:
return None
sftp.close()
ssh.close()
return datetime.datetime.fromtimestamp(timestamp)
def write_file(sftp, remote_file_path, local_file_path='', file_string=''):
"""
Write a file. If `file_string` is given, write it as the content of the file.
Else, if `local_file_path` is given, copy it to `remote_file_path`.
Args:
sftp (paramiko's SFTP): The SFTP object.
remote_file_path (str): The path to write into on the remote server.
local_file_path (str, optional): A local file path to be copied into the remote location.
file_string (str): The file content to be copied and saved as the remote file.
"""
with sftp.open(remote_file_path, 'w') as f_remote:
if file_string:
f_remote.write(file_string)
elif local_file_path:
# with open(local_file_path, 'r') as f_local:
# f_remote.write(f_local.readlines())
sftp.put(localpath=local_file_path, remotepath=remote_file_path)
else:
raise ValueError('Could not upload file to server. Either `file_string` or `local_file_path`'
' must be specified')
def check_job_status_in_stdout(job_id, stdout, server):
"""
A helper function for checking job status.
Args:
job_id (int): the job ID recognized by the server.
stdout (list, str): The output of a queue status check.
server (str): The server name.
Returns:
str: The job status on the server ('running', 'done', or 'errored').
"""
if not isinstance(stdout, list):
stdout = stdout.splitlines()
for status_line in stdout:
if str(job_id) in status_line:
break
else:
return 'done'
status = status_line.split()[4]
if status.lower() in ['r', 'qw', 't']:
return 'running'
else:
if servers[server]['cluster_soft'].lower() == 'oge':
if '.cluster' in status_line:
try:
return 'errored on node ' + status_line.split()[-1].split('@')[1].split('.')[0][-2:]
except IndexError:
return 'errored'
else:
return 'errored'
elif servers[server]['cluster_soft'].lower() == 'slurm':
return 'errored on node ' + status_line.split()[-1][-2:]
else:
raise ValueError(f'Unknown cluster software {servers[server]["cluster_soft"]}')
def delete_all_arc_jobs(server_list, jobs=None):
"""
Delete all ARC-spawned jobs (with job name starting with `a` and a digit) from :list:servers
(`servers` could also be a string of one server name)
Make sure you know what you're doing, so unrelated jobs won't be deleted...
Useful when terminating ARC while some (ghost) jobs are still running.
Args:
server_list (list): List of servers to delete ARC jobs from.
jobs (Optional[List[str]]): Specific ARC job IDs to delete.
"""
if isinstance(server_list, str):
server_list = [server_list]
for server in server_list:
jobs_message = f'{len(jobs)}' if jobs is not None else 'all'
print(f'\nDeleting {jobs_message} ARC jobs from {server}...')
cmd = check_status_command[servers[server]['cluster_soft']] + ' -u $USER'
ssh = SSHClient(server)
stdout = ssh.send_command_to_server(cmd)[0]
for status_line in stdout:
s = re.search(r' a\d+', status_line)
if s is not None:
job_id = s.group()[1:]
if job_id in jobs or jobs is None:
if servers[server]['cluster_soft'].lower() == 'slurm':
server_job_id = status_line.split()[0]
ssh.delete_job(server_job_id)
print(f'deleted job {job_id} ({server_job_id} on server)')
elif servers[server]['cluster_soft'].lower() == 'oge':
ssh.delete_job(job_id)
print(f'deleted job {job_id}')
if server_list:
print('\ndone.')
|
the-stack_0_8313 | from django import forms
from django.contrib.auth.models import User
from django.db.models.fields import json
from django.http import response
from django.shortcuts import render,redirect,get_object_or_404
from .models import Following, Image, Like,Profile,Comment
from django.contrib.auth.forms import UserCreationForm
from .forms import CreateUserForm
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout as dj_login
from django.urls import reverse
from django.contrib.auth import login as dj_login
from django.contrib.auth.decorators import login_required
from .forms import UpdateuserForm,UpdateprofileForm,ImageForm,CommentForm
# Create your views here.
def registeruser(request):
title = 'Register - instagram'
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Account Created Successfully!. Check out our Email later :)')
return redirect('login')
else:
form = CreateUserForm
context = {
'title':title,
'form':form,
}
return render(request, 'register.html', context)
def loginpage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect(reverse('welcome'))
else:
messages.info(request, 'Username or Password mismatch')
context = {}
return render(request, 'login.html',context)
@login_required(login_url='login')
def logout(request):
return redirect(reverse('login'))
@login_required(login_url='login')
def welcome(request):
photos=Image.objects.all()
user=request.user
context= { 'photos':photos,'user':user}
return render (request,'welcome.html',context)
@login_required
def profile(request):
if request.method == 'POST':
u_form = UpdateuserForm(request.POST, instance=request.user)
p_form = UpdateprofileForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile') # Redirect back to profile page
else:
u_form = UpdateuserForm(instance=request.user)
p_form = UpdateprofileForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'profile.html', context)
def search_results(request):
if 'photos' in request.GET and request.GET["photos"]:
search_term = request.GET.get("photos")
searched_profiles = Profile.search_profile(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"photos": searched_profiles})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
@login_required(login_url='login/')
def like(request):
user=request.user
if request.method=='POST':
image_id=request.POST.get('image_id')
image_obj=Image.objects.get(id=image_id)
if user in image_obj.liked.all():
image_obj.liked.remove(user)
else:
image_obj.liked.add(user)
like,created=Like.objects.get_or_create(user=user,image_id=image_id)
if not created:
if like.value=='Like':
like.value='Unlike'
else:
like.value='Like'
like.save()
return redirect('welcome')
def uploadImage(request):
if request.method == "POST":
form=ImageForm(data=request.POST,files=request.FILES)
if form.is_valid():
form.save()
obj=form.instance
return redirect('welcome')
else:
form=ImageForm()
img=Image.objects.all()
return render(request,"index.html",{"form":form})
def viewPhoto(request,pk=int):
photo=Image.objects.get(id=pk)
return render(request,'photo.html',{'photo':photo})
def follow(request,username):
obj=Following.objects.all()
main_user=request.user
to_follow=User.objects.get(username=username)
following=Following.objects.filter(user=main_user,followed=to_follow)
is_following=True if following else False
if is_following:
Following.unfollow(main_user,to_follow)
is_following=False
else:
Following.follow(main_user,to_follow)
is_following=False
resp={'following':is_following}
response=json.dump(resp)
return render(request,'profile.html',response,context_type='application/json',username=username)
def post_detail(request, slug):
post = get_object_or_404(Image, slug=slug)
comments = post.comments.filter(active=True)
new_comment = None
# Comment posted
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
# Create Comment object but don't save to database yet
new_comment = comment_form.save(commit=False)
# Assign the current post to the comment
new_comment.post = post
# Save the comment to the database
new_comment.save()
else:
comment_form = CommentForm()
return render(request, 'comment.html', {'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form})
|
the-stack_0_8315 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import socket
from subprocess import run
import re
import time
from sqlalchemy import false
IP_ADDRESS = '169.254.227.203'
PORT = 50010
np.set_printoptions(suppress=True)
# readColor_path = 'readColor2.txt'
# def GetColorData():
# color_data = ""
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.bind((IP_ADDRESS, PORT))
# s.listen(1)
# print('Start program...')
# conn, addr = s.accept()
# print(f'Connection from {addr} has been established!')
# color_data = conn.recv(4096).decode()
# print(color_data)
# conn.send("I'm Received.".encode('UTF-8'))
# conn.close()
# return color_data
def String_to_3d_Ndarray(string_data):
string_data = string_data.strip()
string_list = string_data.split('\n')
string_part_list = list(map(lambda s: s.split(':'), string_list))
string_color_list = []
for part in string_part_list:
string_color_list.append(list(map(lambda s: s.split(','), part)))
color_ndarray = np.asarray(np.array(string_color_list), dtype=float)
# ndarray[start:stop:step]
color_3d_ndarray = np.round(color_ndarray.reshape((-1, 3)), decimals=2)
return color_3d_ndarray
def Clustering(RGB_data):
n_cluster = 6
center = np.array(
[[240, 255, 241], # 白
[40, 173, 146], # 緑
[226, 255, 255], # 黄色
[33, 127, 131], # 青
[183, 255, 240], # 橙
[126, 52, 13]] # 赤
)
def ShowResult():
markers = ['+', '*', '<', 'o', '1', 's', 'd']
color = ['r', 'b', 'g', 'c', 'm', 'y', 'k']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel("r", size = 14, color = "r")
ax.set_ylabel("g", size = 14, color = "g")
ax.set_zlabel("b", size = 14, color = "b")
for i in range(n_cluster):
p = RGB_data[cube_color_ndarray.ravel() == i, :]
ax.scatter3D(p[:, 0], p[:, 1], p[:, 2], marker = markers[i], color = color[i])
plt.show()
try:
cube_color_ndarray = kmeans(RGB_data, n_cluster, center, 200).reshape((-1, 3))
except Exception as e:
print(e)
return None
ShowResult()
return cube_color_ndarray
def Identify_Color_of_UFR(cube_color, color_of_DBL_dict):
inverse_side = {'U':'D', 'F':'B', 'R':'L', 'D':'U', 'B':'F', 'L':'R'}
color_of_UFR_dict = {}
for i in range(8):
if i == 4:
continue
side = {'U':True, 'F':True, 'R':True}
for DBL_key, DBL_color in color_of_DBL_dict.items():
if DBL_color in cube_color[i]:
cube_color[i].remove(DBL_color)
side[inverse_side[DBL_key]] = False
if len(cube_color[i]) == 1:
True_Keys = [k for k,v in side.items() if v]
color_of_UFR_dict[True_Keys[0]] = cube_color[i][0]
return color_of_UFR_dict
def Identify_State(cube_color, color_dict):
part_sets = {
frozenset(['U', 'L', 'B']): 0,
frozenset(['U', 'B', 'R']): 1,
frozenset(['U', 'R', 'F']): 2,
frozenset(['U', 'F', 'L']): 3,
frozenset(['D', 'B', 'L']): 4,
frozenset(['D', 'R', 'B']): 5,
frozenset(['D', 'F', 'R']): 6,
frozenset(['D', 'L', 'F']): 7}
cp = [-1 for i in range(8)]
co = [-1 for i in range(8)]
try:
for i in range(len(cube_color)):
part_color_set = set()
for color in cube_color[i]:
part_color_set.add([k for k, v in color_dict.items() if v == color][0])
cp[i] = part_sets[frozenset(part_color_set)]
if color_dict['U'] in cube_color[i]:
co[i] = cube_color[i].index(color_dict['U'])
elif color_dict['D'] in cube_color[i]:
co[i] = cube_color[i].index(color_dict['D'])
except Exception as e:
print(e)
return None, None
print(cp)
print(co)
return cp, co
def UFR_to_DYZ(UFR_ways, now_po): #now_po = now_per_ori
def most_ori(per_ori): # 向きの最上位ビットを求める
if per_ori[0] % 2 == 0: # 順列が偶数ならば
return ((per_ori[1] & 0b10) >> 1 ^ per_ori[1] & 0b01) & 0b01 # 最上位ビット = 最下位ビット xor 真ん中ビット
else: # 順列が奇数ならば
return ~((per_ori[1] & 0b10) >> 1 ^ per_ori[1] & 0b01) & 0b01 # 最上位ビット = not(最下位ビット xor 真ん中ビット)
def doY(now_per_ori): # Yした後のper_oriを求める
next_per = now_per_ori[0] // 2 * 2 + 1 - now_per_ori[0] % 2
next_ori = now_per_ori[1] >> 1 | ~(now_per_ori[1] & 0b01) << 1 & 0b10
return (next_per, next_ori)
def doZ(now_per_ori): # Zした後のper_oriを求める
next_per = (now_per_ori[0] + 3) % 6
next_ori = ~(most_ori(now_per_ori) << 1) & 0b10 | now_per_ori[1] & 0b01
return (next_per, next_ori)
def direction_change(pre_per_ori, rotate_side):
if rotate_side == "U":
base_per = 0
elif rotate_side == "F":
base_per = 4
else:
base_per = 2
if pre_per_ori[0] == base_per or pre_per_ori[0] == base_per + 1:
if most_ori(pre_per_ori) == 1:
return ("", pre_per_ori)
else:
return ("Z2 ", doZ(doZ(pre_per_ori)))
elif pre_per_ori[0] == (base_per+3) % 6 or pre_per_ori[0] == (base_per+4) % 6:
temp_per_ori = doZ(pre_per_ori)
if most_ori(temp_per_ori) == 1:
return ("Z ", temp_per_ori)
else:
return ("Y2 Z ", doZ(doY(doY(pre_per_ori))))
else:
temp_per_ori = doZ(doY(pre_per_ori))
if most_ori(temp_per_ori) == 1:
return ("Y Z ", temp_per_ori)
else:
return ("Y3 Z ", doZ(doY(doY(doY(pre_per_ori)))))
def add_reset_move(pre_per_ori):
# Dした後にこれを呼び出す.
return ("Z Y3 R ", doY(doY(doY(doZ(pre_per_ori)))))
po = now_po
DYZ_ways = ""
for move_name in UFR_ways.split(","):
if move_name == "":
return ""
else:
if re.match(r"^U.?$", move_name):
direction_change_info = direction_change(po,"U")
DYZ_ways += direction_change_info[0]
po = direction_change_info[1]
elif re.match(r"^F.?$", move_name):
direction_change_info = direction_change(po,"F")
DYZ_ways += direction_change_info[0]
po = direction_change_info[1]
else:
direction_change_info = direction_change(po,"R")
DYZ_ways += direction_change_info[0]
po = direction_change_info[1]
if re.match(r"^.2$", move_name):
DYZ_ways += "D2 "
elif re.match(r"^.3$", move_name):
DYZ_ways += "D3 "
else:
DYZ_ways += "D "
direction_change_info = add_reset_move(po)
DYZ_ways += direction_change_info[0]
po = direction_change_info[1]
return DYZ_ways[:-7].strip()
from kmeans_initSet import kmeans
import copy
way_path = 'solve_way.txt'
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# 接続準備.
s.bind((IP_ADDRESS, PORT))
s.listen(1)
print('Start program...')
# EV3からカラーデータの受け取り.
conn, addr = s.accept()
print(f'Connection from {addr} has been established!')
color_data_string = conn.recv(4096).decode()
RGB_data = String_to_3d_Ndarray(color_data_string)
print(RGB_data)
# クラスタリング.
cube_color_ndarray = Clustering(RGB_data)
if cube_color_ndarray is None:
conn.send("Error Happend!".encode('UTF-8'))
conn.close()
return
print(cube_color_ndarray)
# 何の色がどの面なのかの特定.
cube_color = cube_color_ndarray.tolist()
DLB_keys = ['D', 'B', 'L']
color_of_DBL_dict = dict(zip(DLB_keys, cube_color[4]))
try:
color_of_UFR_dict = Identify_Color_of_UFR(copy.deepcopy(cube_color), color_of_DBL_dict)
except Exception as e:
print(e)
conn.send("Error Happend!".encode('UTF-8'))
conn.close()
return
color_of_Side_dict = {**color_of_UFR_dict, **color_of_DBL_dict}
print(color_of_Side_dict)
# cp, coの特定.
cp, co = Identify_State(cube_color, color_of_Side_dict)
if cp is None:
conn.send("Error Happend!".encode('UTF-8'))
conn.close()
return
cp_str = ','.join(map(str, cp))
co_str = ','.join(map(str, co))
# 解法の計算.
start_time = time.perf_counter()
try:
run(['./rcSolver.exe', cp_str, co_str])
# runメソッドはサブプロセスが終わるまで待ってくれる.
except KeyboardInterrupt:
print('interrupted!')
print('Solving Time: {:.4f}sec.'.format(time.perf_counter() - start_time))
conn.send("Error Happend!".encode('UTF-8'))
conn.close()
return
except Exception as e:
print(e)
print('Solving Time: {:.4f}sec.'.format(time.perf_counter() - start_time))
conn.send("Error Happend!".encode('UTF-8'))
conn.close()
return
print('Solving Time: {:.4f}sec.'.format(time.perf_counter() - start_time))
with open(way_path) as f:
UFR_ways = f.read()
if UFR_ways == 'Cannot_Solve!':
conn.send("Error Happend!".encode('UTF-8'))
conn.close()
return
# ルービックキューブソルバーで動かせるように変換.
DYZ_ways = UFR_to_DYZ(UFR_ways, (0, 0b00))
# 解法をEV3へ送信.
conn.send(DYZ_ways.encode('UTF-8'))
conn.close()
print('finish 1 turn.')
if __name__ == '__main__':
while True:
judgment = input('Ready. Start Connection? [y/n]')
if judgment != 'y':
break
main() |
the-stack_0_8316 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Utilities for managing paths in Spack.
TODO: this is really part of spack.config. Consolidate it.
"""
import contextlib
import getpass
import os
import re
import subprocess
import tempfile
import llnl.util.tty as tty
from llnl.util.lang import memoized
import spack.paths
import spack.util.spack_yaml as syaml
__all__ = [
'substitute_config_variables',
'substitute_path_variables',
'canonicalize_path']
# Substitutions to perform
replacements = {
'spack': spack.paths.prefix,
'user': getpass.getuser(),
'tempdir': tempfile.gettempdir(),
'user_cache_path': spack.paths.user_cache_path,
}
# This is intended to be longer than the part of the install path
# spack generates from the root path we give it. Included in the
# estimate:
#
# os-arch -> 30
# compiler -> 30
# package name -> 50 (longest is currently 47 characters)
# version -> 20
# hash -> 32
# buffer -> 138
# ---------------------
# total -> 300
SPACK_MAX_INSTALL_PATH_LENGTH = 300
#: Padded paths comprise directories with this name (or some prefix of it). :
#: It starts with two underscores to make it unlikely that prefix matches would
#: include some other component of the intallation path.
SPACK_PATH_PADDING_CHARS = '__spack_path_placeholder__'
@memoized
def get_system_path_max():
# Choose a conservative default
sys_max_path_length = 256
try:
path_max_proc = subprocess.Popen(['getconf', 'PATH_MAX', '/'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc_output = str(path_max_proc.communicate()[0].decode())
sys_max_path_length = int(proc_output)
except (ValueError, subprocess.CalledProcessError, OSError):
tty.msg('Unable to find system max path length, using: {0}'.format(
sys_max_path_length))
return sys_max_path_length
def substitute_config_variables(path):
"""Substitute placeholders into paths.
Spack allows paths in configs to have some placeholders, as follows:
- $env The active Spack environment.
- $spack The Spack instance's prefix
- $tempdir Default temporary directory returned by tempfile.gettempdir()
- $user The current user's username
- $user_cache_path The user cache directory (~/.spack, unless overridden)
These are substituted case-insensitively into the path, and users can
use either ``$var`` or ``${var}`` syntax for the variables. $env is only
replaced if there is an active environment, and should only be used in
environment yaml files.
"""
import spack.environment as ev # break circular
env = ev.active_environment()
if env:
replacements.update({'env': env.path})
else:
# If a previous invocation added env, remove it
replacements.pop('env', None)
# Look up replacements
def repl(match):
m = match.group(0).strip('${}')
return replacements.get(m.lower(), match.group(0))
# Replace $var or ${var}.
return re.sub(r'(\$\w+\b|\$\{\w+\})', repl, path)
def substitute_path_variables(path):
"""Substitute config vars, expand environment vars, expand user home."""
path = substitute_config_variables(path)
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
def _get_padding_string(length):
spack_path_padding_size = len(SPACK_PATH_PADDING_CHARS)
num_reps = int(length / (spack_path_padding_size + 1))
extra_chars = length % (spack_path_padding_size + 1)
reps_list = [SPACK_PATH_PADDING_CHARS for i in range(num_reps)]
reps_list.append(SPACK_PATH_PADDING_CHARS[:extra_chars])
return os.path.sep.join(reps_list)
def add_padding(path, length):
"""Add padding subdirectories to path until total is length characters
Returns the padded path. If path is length - 1 or more characters long,
returns path. If path is length - 1 characters, warns that it is not
padding to length
Assumes path does not have a trailing path separator"""
padding_length = length - len(path)
if padding_length == 1:
# The only 1 character addition we can make to a path is `/`
# Spack internally runs normpath, so `foo/` will be reduced to `foo`
# Even if we removed this behavior from Spack, the user could normalize
# the path, removing the additional `/`.
# Because we can't expect one character of padding to show up in the
# resulting binaries, we warn the user and do not pad by a single char
tty.warn("Cannot pad path by exactly one character.")
if padding_length <= 0:
return path
# we subtract 1 from the padding_length to account for the path separator
# coming from os.path.join below
padding = _get_padding_string(padding_length - 1)
return os.path.join(path, padding)
def canonicalize_path(path):
"""Same as substitute_path_variables, but also take absolute path."""
# Get file in which path was written in case we need to make it absolute
# relative to that path.
filename = None
if isinstance(path, syaml.syaml_str):
filename = os.path.dirname(path._start_mark.name)
assert path._start_mark.name == path._end_mark.name
path = substitute_path_variables(path)
if not os.path.isabs(path):
if filename:
path = os.path.join(filename, path)
else:
path = os.path.abspath(path)
tty.debug("Using current working directory as base for abspath")
return os.path.normpath(path)
def longest_prefix_re(string, capture=True):
"""Return a regular expression that matches a the longest possible prefix of string.
i.e., if the input string is ``the_quick_brown_fox``, then::
m = re.compile(longest_prefix('the_quick_brown_fox'))
m.match('the_').group(1) == 'the_'
m.match('the_quick').group(1) == 'the_quick'
m.match('the_quick_brown_fox').group(1) == 'the_quick_brown_fox'
m.match('the_xquick_brown_fox').group(1) == 'the_'
m.match('the_quickx_brown_fox').group(1) == 'the_quick'
"""
if len(string) < 2:
return string
return "(%s%s%s?)" % (
"" if capture else "?:",
string[0],
longest_prefix_re(string[1:], capture=False)
)
#: regex cache for padding_filter function
_filter_re = None
def padding_filter(string):
"""Filter used to reduce output from path padding in log output.
This turns paths like this:
/foo/bar/__spack_path_placeholder__/__spack_path_placeholder__/...
Into paths like this:
/foo/bar/[padded-to-512-chars]/...
Where ``padded-to-512-chars`` indicates that the prefix was padded with
placeholders until it hit 512 characters. The actual value of this number
depends on what the `install_tree``'s ``padded_length`` is configured to.
For a path to match and be filtered, the placeholder must appear in its
entirety at least one time. e.g., "/spack/" would not be filtered, but
"/__spack_path_placeholder__/spack/" would be.
"""
global _filter_re
pad = spack.util.path.SPACK_PATH_PADDING_CHARS
if not _filter_re:
longest_prefix = longest_prefix_re(pad)
regex = (
r"((?:/[^/\s]*)*?)" # zero or more leading non-whitespace path components
r"(/{pad})+" # the padding string repeated one or more times
r"(/{longest_prefix})?(?=/)" # trailing prefix of padding as path component
)
regex = regex.replace("/", os.sep)
regex = regex.format(pad=pad, longest_prefix=longest_prefix)
_filter_re = re.compile(regex)
def replacer(match):
return "%s%s[padded-to-%d-chars]" % (
match.group(1),
os.sep,
len(match.group(0))
)
return _filter_re.sub(replacer, string)
@contextlib.contextmanager
def filter_padding():
"""Context manager to safely disable path padding in all Spack output.
This is needed because Spack's debug output gets extremely long when we use a
long padded installation path.
"""
padding = spack.config.get("config:install_tree:padded_length", None)
if padding:
# filter out all padding from the intsall command output
with tty.output_filter(padding_filter):
yield
else:
yield # no-op: don't filter unless padding is actually enabled
|
the-stack_0_8317 | ######################################################################
# Author: John Martin TODO: Change this to your names
# Username: MartinJoh TODO: Change this to your usernames
#
# Assignment: A08: UPC Bar Codes
#
# Purpose: Determine how to do some basic operations on lists
#
######################################################################
# Acknowledgements:
#
# None: Original work
# licensed under a Creative Commons
# Attribution-Noncommercial-Share Alike 3.0 United States License.
####################################################################################
import turtle
#guard=[]
#upc=[]
# draw( ,hieight )
#if color == 0
# t.color ("black")
#t.begin_fill
#draw rectangle
#t.end_fill()
def is_valid_input(barcode):
'''
:param barcode:
:return:
'''
if int(len(barcode)) == 12: #checks if line is right length, and has the propper amount of numbers
return True
else:
return False
def convert_to_list (input_code):
'''
:param input_code:
:return: Turns users input into a list
'''
converted_input= list(str(input_code)) #Turns user input into list that is mutuable
#print(converted_input)
return converted_input
def is_valid_modulo(upc_list):
'''
Couter (any variable name ) before for loop, set counter to zero
Odd (0) and even (1) <---- (what to start with)
loop through add i to different variable that starts out as zeor
add to that other variable
add pluse two to the counter in the for loop
:param upc_list:
:return: Upc_list with modulo check number appended to the end
'''
even_sum = 0
odd_sum = 0
#modulo=upc_list[0:11]
for num in upc_list:
if int(num) % 2 == 0: #takes the remainder of num divided by 2
even_sum += int(num)
#print("even_sum=" + str(even_sum))
else:
odd_sum += int(num)
#print("odd sum = " + str(odd_sum))
total_sum= even_sum + 3*odd_sum #adds even sum to the odd sum multiplied by 3
#print("total sum = " + str(total_sum))
equation_number = int(total_sum) % 10
#print("equation num = " + str(equation_number))
if equation_number > 0:
check_number = 10 - equation_number
print(str(check_number))
upc_list.append(check_number)
else:
return total_sum
return upc_list
def convert_binary(upc_barcode_list):
"""
:param upc_barcode_list:
:return:
"""
translatorleft = {"0":"0001101", #Makes a class for each of the numbers binary equals
"1":"0011001",
"2":"0010011",
"3":"0111101",
"4":"0100011",
"5":"0110001",
"6":"0101111",
"7":"0111011",
"8":"0110111",
"9":"0001011"}
translatorright = {"0":"1110010",
"1":"1100110",
"2":"1101100",
"3":"1000010",
"4":"1011100",
"5":"1001110",
"6":"1010000",
"7":"1000100",
"8":"1001000",
"9":"1110100"}
guardbar= "101"
centerbar= "01010"
binaryleft = ""
binaryright= ""
for x in upc_barcode_list[0:7]:
if x in translatorleft.keys():
binaryleft += str(translatorleft[x])
#print(str(binaryleft))
else:
return "?"
for i in upc_barcode_list[7:]:
if i in translatorright.keys():
binaryright += str(translatorright[i])
print (guardbar + binaryleft + centerbar + binaryright + guardbar)
return guardbar + binaryleft + centerbar + binaryright + guardbar #combines binary to form binary version of barcode
def binary_image(binary):
'''
:param binary:
:return: none
'''
guard=[]
upc=list(binary)
binny = turtle.Turtle()
binny.speed(0)
binny.pensize(4)
binny.shape("arrow")
binny.penup()
binny.setpos(-200,150)
binny.pendown()
(x,y)= binny.pos()
for i,e in enumerate(upc):
if e == "1":
binny.goto(x,y-200)
binny.goto(x,y)
x+=4
binny.penup()
binny.goto(x,y)
binny.pendown()
def make_numbers(input_code):
'''
:param input_code:
:return: None
'''
bin = turtle.Turtle()
bin.speed(0)
bin.pensize(4)
bin.shape("arrow")
bin.penup()
bin.right(90)
bin.forward(100)
bin.right(90)
bin.forward(50)
bin.pendown()
bin.write(input_code,False,"center",("Arial",20,"normal"))
def main():
wn = turtle.Screen()
input_code = input("Enter a 12 digit code [0-9]: ")
while not is_valid_input(input_code):
input_code = input("Invalid number. Enter a 12 digit code [0-9]: ")
upc_list = convert_to_list(input_code)
upc_barcode_list = is_valid_modulo(upc_list)
upc_binary=convert_binary(upc_barcode_list)
binary_image(upc_binary)
make_numbers(input_code)
wn.exitonclick()
if __name__ == "__main__":
main()
|
the-stack_0_8318 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Woochain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.test_framework import WoochainTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(WoochainTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-checkmempool"]] * 2
alert_filename = None # Set by setup_network
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
the-stack_0_8319 | # Copyright (c) 2017, Fernando Freire <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
import types
from . import AWSObject, AWSProperty
from .awslambda import Environment, VPCConfig, validate_memory_size
from .dynamodb import ProvisionedThroughput
from .validators import positive_integer
assert types # silence pyflakes
def primary_key_type_validator(x):
valid_types = ["String", "Number", "Binary"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x
def policy_validator(x):
if isinstance(x, types.StringTypes):
return x
elif isinstance(x, types.ListType):
return x
else:
raise ValueError("Policies must refer to a managed policy, a list of "
+ "policies, an IAM policy document, or a list of IAM"
+ " policy documents")
class Function(AWSObject):
resource_type = "AWS::Serverless::Function"
props = {
'Handler': (basestring, True),
'Runtime': (basestring, True),
'CodeUri': (basestring, True),
'Description': (basestring, False),
'MemorySize': (validate_memory_size, False),
'Timeout': (positive_integer, False),
'Role': (basestring, False),
'Policies': (policy_validator, False),
'Environment': (Environment, False),
'VpcConfig': (VPCConfig, False),
'Events': (dict, False)
}
class Api(AWSObject):
resource_type = "AWS::Serverless::Api"
props = {
'StageName': (basestring, True),
'DefinitionUri': (basestring, True),
'CacheClusterEnabled': (bool, False),
'CacheClusterSize': (basestring, False),
'Variables': (dict, False)
}
class PrimaryKey(AWSProperty):
props = {
'Name': (basestring, False),
'Type': (primary_key_type_validator, False)
}
class SimpleTable(AWSObject):
resource_type = "AWS::Serverless::SimpleTable"
props = {
'PrimaryKey': (PrimaryKey, False),
'ProvisionedThroughput': (ProvisionedThroughput, False)
}
class S3Event(AWSObject):
resource_type = 'S3'
props = {
'Bucket': (basestring, True),
'Events': (list, True),
'Filter': (basestring, False)
}
class SNSEvent(AWSObject):
resource_type = 'SNS'
props = {
'Topic': (basestring, True)
}
def starting_position_validator(x):
valid_types = ['TRIM_HORIZON', 'LATEST']
if x not in valid_types:
raise ValueError(
"StartingPosition must be one of: %s"
% ", ".join(valid_types)
)
return x
class KinesisEvent(AWSObject):
resource_type = 'Kinesis'
props = {
'Stream': (basestring, True),
'StartingPosition': (starting_position_validator, True),
'BatchSize': (positive_integer, False)
}
class DynamoDBEvent(AWSObject):
resource_type = 'DynamoDB'
props = {
'Stream': (basestring, True),
'StartingPosition': (starting_position_validator, True),
'BatchSize': (positive_integer, False)
}
class ApiEvent(AWSObject):
resource_type = 'Api'
props = {
'Path': (basestring, True),
'Method': (basestring, True),
'RestApiId': (basestring, False)
}
class ScheduleEvent(AWSObject):
resource_type = 'Schedule'
props = {
'Schedule': (basestring, True),
'Input': (basestring, False)
}
class CloudWatchEvent(AWSObject):
resource_type = 'CloudWatchEvent'
props = {
'Pattern': (dict, True),
'Input': (basestring, False),
'InputPath': (basestring, False)
}
class IoTRuleEvent(AWSObject):
resource_type = 'IoTRule'
props = {
'Sql': (basestring, True),
'AwsIotSqlVersion': (basestring, False)
}
class AlexaSkillEvent(AWSObject):
resource_type = 'AlexaSkill'
props = {}
|
the-stack_0_8320 | from PyQt5 import QtCore
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QFrame, QComboBox, QVBoxLayout, QListWidget, QCompleter, QPushButton, QLabel
from PyQt5.QtGui import QBrush, QColor
from .elements.list_item import ListItem
class TrainingListWidget(QWidget):
def __init__(self, *args, **kwargs):
super(TrainingListWidget, self).__init__(*args, **kwargs)
self.entries = []
layout = QVBoxLayout()
self.setLayout(layout)
self.setStyleSheet("background-color: rgb(54,197,254); color: rgb(2,4,40)")
# Select Bot
selectBot = QWidget()
selectBotLayout = QHBoxLayout()
selectBot.setLayout(selectBotLayout)
self.selectBotBar = QComboBox()
self.selectBotBar.addItem("General")
self.selectBotBar.addItem("Gustav")
self.addBotButton = QPushButton('+')
self.addBotButton.setFixedWidth(30)
self.deleteBotButton = QPushButton('x')
self.deleteBotButton.setFixedWidth(30)
self.selectBotBar.currentIndexChanged.connect(self.dropDownListener)
selectBotLayout.addWidget(self.selectBotBar)
selectBotLayout.addWidget(self.addBotButton)
selectBotLayout.addWidget(self.deleteBotButton)
# Category List
self.resultList = QListWidget() # write own class with entries
# Add remove Categories
self.addButton = QPushButton("+", self)
self.addButton.setToolTip('Add a new training entry')
self.addButton.setStyleSheet("background-color: rgb(54,197,254); color: rgb(2,4,40)")
self.removeButton = QPushButton("-", self)
self.removeButton.setToolTip('Remove current entry')
self.removeButton.setStyleSheet("background-color: rgb(54,197,254); color: rgb(2,4,40)")
buttonWidget = QWidget()
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(self.addButton)
buttonLayout.addWidget(self.removeButton)
buttonLayout.setContentsMargins(0, 0, 0, 0)
buttonWidget.setLayout(buttonLayout)
buttonWidget.setStyleSheet("background-color: rgb(2,4,40)")
layout.addWidget(selectBot)
layout.addWidget(self.resultList)
layout.addWidget(buttonWidget)
def display(self, entries, currentEntry):
self.entries = entries
self.listItems = []
self.resultList.clear()
i=0
for entry in entries:
listItem = ListItem(entry.name)
if(not entry.isSaved):
brush = QBrush(QColor(255,255,0))
listItem.setForeground(brush)
self.listItems.append(listItem)
self.resultList.addItem(listItem)
self.selectEntry(currentEntry)
def selectEntry(self, selectedEntry):
selectedId = id(selectedEntry)
i = 0
for entry in self.entries:
if(id(entry) == selectedId):
self.resultList.setCurrentRow(i)
i += 1
def convertListitem(self, listItem):
i = 0
for item in self.listItems:
if(item is listItem):
return self.entries[i]
i += 1
def dropDownListener(self, index):
if index == 0:
self.deleteBotButton.setEnabled(False)
else:
self.deleteBotButton.setEnabled(True)
def updateBots(self, bots):
self.selectBotBar.clear()
for bot in bots:
self.selectBotBar.addItem(bot.name) |
the-stack_0_8321 | import chess
from .types import *
from .player import Player
from .game import Game, LocalGame, RemoteGame
from .history import GameHistory
def play_local_game(white_player: Player, black_player: Player, seconds_per_player: float = 900) -> Tuple[
Optional[Color], Optional[WinReason], GameHistory]:
"""
Plays a game between the two players passed in. Uses :class:`LocalGame` to run the game, and just calls
:func:`play_turn` until the game is over: ::
while not game.is_over():
play_turn(game, player)
:param white_player: The white :class:`Player`.
:param black_player: The black :class:`Player`.
:return: The results of the game, also passed to each player via :meth:`Player.handle_game_end`.
"""
players = [black_player, white_player]
game = LocalGame(seconds_per_player=seconds_per_player)
white_player.handle_game_start(chess.WHITE, game.board.copy())
black_player.handle_game_start(chess.BLACK, game.board.copy())
game.start()
while not game.is_over():
play_turn(game, players[game.turn])
game.end()
winner_color = game.get_winner_color()
win_reason = game.get_win_reason()
game_history = game.get_game_history()
white_player.handle_game_end(winner_color, win_reason, game_history)
black_player.handle_game_end(winner_color, win_reason, game_history)
return winner_color, win_reason, game_history
def play_remote_game(name, game_id, player: Player):
game = RemoteGame(game_id)
color = game.get_player_color(name)
player.handle_game_start(color, game.get_starting_board())
game.start()
while not game.is_over():
game.wait_for_turn(name)
play_turn(game, player)
# TODO get win reason here
player.handle_game_end(game.get_winner_color(), game.get_game_history())
def play_turn(game: Game, player: Player):
"""
Coordinates playing a turn for `player` in `game`. Does the following sequentially:
#. :func:`notify_opponent_move_results`
#. :func:`play_sense`
#. :func:`play_move`
#. :meth:`Game.end_turn`
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
"""
sense_actions = game.sense_actions()
move_actions = game.move_actions()
notify_opponent_move_results(game, player)
play_sense(game, player, sense_actions, move_actions)
play_move(game, player, move_actions)
game.end_turn()
def notify_opponent_move_results(game: Game, player: Player):
"""
Passes the opponents move results to the player. Does the following sequentially:
#. Get the results of the opponents move using :meth:`Game.opponent_move_results`.
#. Give the results to the player using :meth:`Player.handle_opponent_move_result`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
"""
opt_capture_square = game.opponent_move_results()
player.handle_opponent_move_result(opt_capture_square is not None, opt_capture_square)
def play_sense(game: Game, player: Player, sense_actions: List[Square], move_actions: List[chess.Move]):
"""
Runs the sense phase for `player` in `game`. Does the following sequentially:
#. Get the sensing action using :meth:`Player.choose_sense`.
#. Apply the sense action using :meth:`Game.sense`.
#. Give the result of the sense action to player using :meth:`Player.handle_sense_result`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
:param sense_actions: The possible sense actions for `player`.
:param move_actions: The possible move actions for `player`.
"""
sense = player.choose_sense(sense_actions, move_actions, game.get_seconds_left())
sense_result = game.sense(sense)
player.handle_sense_result(sense_result)
def play_move(game: Game, player: Player, move_actions: List[chess.Move]):
"""
Runs the move phase for `player` in `game`. Does the following sequentially:
#. Get the moving action using :meth:`Player.choose_move`.
#. Apply the moving action using :meth:`Game.move`.
#. Give the result of the moveaction to player using :meth:`Player.handle_move_result`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
:param move_actions: The possible move actions for `player`.
"""
move = player.choose_move(move_actions, game.get_seconds_left())
requested_move, taken_move, opt_enemy_capture_square = game.move(move)
player.handle_move_result(requested_move, taken_move,
opt_enemy_capture_square is not None, opt_enemy_capture_square)
|
the-stack_0_8323 | # -*- coding: utf-8 -*-
"""Parsing of Netflix Website"""
from __future__ import unicode_literals
import json
import traceback
from re import compile as recompile, DOTALL, sub
from collections import OrderedDict
import resources.lib.common as common
from resources.lib.globals import g
from .paths import resolve_refs
from .exceptions import (InvalidProfilesError, InvalidAuthURLError,
InvalidMembershipStatusError, WebsiteParsingError)
PAGE_ITEMS = [
'models/userInfo/data/authURL',
'models/userInfo/data/guid',
'models/userInfo/data/countryOfSignup',
'models/userInfo/data/membershipStatus',
'models/serverDefs/data/BUILD_IDENTIFIER',
'models/serverDefs/data/ICHNAEA_ROOT',
'models/serverDefs/data/API_ROOT',
'models/playerModel/data/config/ui/initParams/apiUrl',
'models/esnGeneratorModel/data/esn',
'models/memberContext/data/geo/preferredLocale'
]
JSON_REGEX = r'netflix\.%s\s*=\s*(.*?);\s*</script>'
AVATAR_SUBPATH = ['images', 'byWidth', '320', 'value']
@common.time_execution(immediate=True)
def extract_session_data(content):
"""
Call all the parsers we need to extract all
the session relevant data from the HTML page
"""
common.debug('Extracting session data...')
profiles, active_profile = extract_profiles(
extract_json(content, 'falcorCache'))
user_data = extract_userdata(content)
if user_data.get('preferredLocale'):
g.PERSISTENT_STORAGE['locale_id'] = user_data.get('preferredLocale').get('id','en-US')
if user_data.get('membershipStatus') != 'CURRENT_MEMBER':
common.debug(user_data)
# Ignore this for now
# raise InvalidMembershipStatusError(user_data.get('membershipStatus'))
return {
'profiles': profiles,
'active_profile': active_profile,
'user_data': user_data,
'esn': generate_esn(user_data),
'api_data': _parse_api_data(user_data)
}
@common.time_execution(immediate=True)
def extract_profiles(falkor_cache):
"""Extract profile information from Netflix website"""
profiles = {}
active_profile = None
try:
profiles_list = OrderedDict(resolve_refs(falkor_cache['profilesList'], falkor_cache))
for guid, profile in profiles_list.items():
common.debug('Parsing profile {}'.format(guid))
avatar_url = _get_avatar(falkor_cache, profile)
profile = profile['summary']['value']
profile['avatar'] = avatar_url
if profile.get('isActive'):
active_profile = guid
profiles[list(profiles_list.keys()).index(guid)] = (guid, profile)
except Exception:
common.error(traceback.format_exc())
common.error('Falkor cache: {}'.format(falkor_cache))
raise InvalidProfilesError
return profiles, active_profile
def _get_avatar(falkor_cache, profile):
try:
profile['avatar']['value'].extend(AVATAR_SUBPATH)
return common.get_path(profile['avatar']['value'], falkor_cache)
except KeyError:
common.warn('Cannot find avatar for profile {guid}'
.format(guid=profile['summary']['value']['guid']))
return ''
@common.time_execution(immediate=True)
def extract_userdata(content):
"""Extract essential userdata from the reactContext of the webpage"""
common.debug('Extracting userdata from webpage')
user_data = {}
react_context = extract_json(content, 'reactContext')
for path in ([path_item for path_item in path.split('/')]
for path in PAGE_ITEMS):
try:
user_data.update({path[-1]: common.get_path(path, react_context)})
common.debug('Extracted {}'.format(path))
except (AttributeError, KeyError):
common.debug('Could not extract {}'.format(path))
return assert_valid_auth_url(user_data)
def _parse_api_data(user_data):
return {api_item: user_data[api_item]
for api_item in (
item.split('/')[-1]
for item in PAGE_ITEMS
if ('serverDefs' in item) or ('initParams/apiUrl' in item))}
def assert_valid_auth_url(user_data):
"""Raise an exception if user_data does not contain a valid authURL"""
if len(user_data.get('authURL', '')) != 42:
raise InvalidAuthURLError('authURL is invalid')
return user_data
def generate_esn(user_data):
"""Generate an ESN if on android or return the one from user_data"""
import subprocess
try:
manufacturer = subprocess.check_output(
['/system/bin/getprop', 'ro.product.manufacturer'])
if manufacturer:
esn = ('NFANDROID1-PRV-'
if subprocess.check_output(
['/system/bin/getprop', 'ro.build.characteristics']
).strip(' \t\n\r') != 'tv'
else 'NFANDROID2-PRV-')
inp = subprocess.check_output(
['/system/bin/getprop', 'ro.nrdp.modelgroup']).strip(' \t\n\r')
if not inp:
esn += 'T-L3-'
else:
esn += inp + '-'
esn += '{:=<5}'.format(manufacturer.strip(' \t\n\r').upper())
inp = subprocess.check_output(
['/system/bin/getprop', 'ro.product.model'])
esn += inp.strip(' \t\n\r').replace(' ', '=').upper()
esn = sub(r'[^A-Za-z0-9=-]', '=', esn)
common.debug('Android generated ESN:' + esn)
return esn
except OSError:
pass
return user_data.get('esn', '')
@common.time_execution(immediate=True)
def extract_json(content, name):
"""Extract json from netflix content page"""
common.debug('Extracting {} JSON'.format(name))
json_str = None
try:
json_array = recompile(JSON_REGEX % name, DOTALL).findall(content)
json_str = json_array[0]
json_str = json_str.replace('\"', '\\"') # Escape double-quotes
json_str = json_str.replace('\\s', '\\\\s') # Escape \s
json_str = json_str.replace('\\n', '\\\\n') # Escape line feed
json_str = json_str.replace('\\t', '\\\\t') # Escape tab
json_str = json_str.decode('unicode_escape') # finally decoding...
return json.loads(json_str)
except Exception:
if json_str:
common.error('JSON string trying to load: {}'.format(json_str))
common.error(traceback.format_exc())
raise WebsiteParsingError('Unable to extract {}'.format(name))
|
the-stack_0_8325 | from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QPixmap, QTextCursor, QCursor, QFont, QColor
from PyQt5.QtSql import QSqlTableModel, QSqlDatabase
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject, QTimer, Qt, QModelIndex, qInstallMessageHandler, QSize, QRect
import os
import time
import json
import webbrowser
from functools import partial
from manage_code import send_code
from database_management import submission_management, query_management, manage_local_ids
from init_client import handle_config
config = handle_config.read_config_json()
class ui_widgets():
#############################################################################
# Handle UI for various button presses
var = {}
def problems_ui(self):
main_layout = QVBoxLayout()
heading = QLabel('Problems')
heading.setObjectName('main_screen_heading')
column = 0
row = 0
number_of_buttons = 1
self.scrollArea = QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName('myobject')
problems_layout = QGridLayout(self.scrollAreaWidgetContents)
# problems_layout = QGridLayout()
# problems_layout.setSpacing(20)
while(number_of_buttons <= config["No_of_Problems"]):
# for i in range(config["No_of_Problems"]):
# problem_name = eval(config["Problems"]['Problem ' + str(number_of_buttons)])
problem_name = config["Problems"]['Problem ' + str(number_of_buttons)]
problem_name = problem_name[0]
ui_widgets.var['Problem {}'.format(number_of_buttons)] = QPushButton('Problem '+str(number_of_buttons) + '\n' + problem_name,self)
ui_widgets.var['Problem {}'.format(number_of_buttons)].setObjectName('problem_buttons')
ui_widgets.var['Problem {}'.format(number_of_buttons)].setFixedSize(500, 200)
ui_widgets.var['Problem {}'.format(number_of_buttons)].clicked.connect(partial(self.show_problem, number_of_buttons, self.data_changed_flag))
problems_layout.addWidget(ui_widgets.var['Problem {}'.format(number_of_buttons)],row,column)
if(column==1):
row+=1;
column=0;
else:
column+=1;
number_of_buttons+=1;
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
# self.scrollArea.setFixedHeight(700)
self.scrollArea.setObjectName('myscrollarea')
problems_layout.setObjectName('mygrid')
# problems_widget = QWidget()
# problems_widget.setLayout(problems_layout)
main_layout.addWidget(heading)
main_layout.addWidget(self.scrollArea)
main_layout.setStretch(0, 5)
main_layout.setStretch(1, 95)
# main_layout.addWidget(problems_widget)
main_layout.addStretch(5)
main = QWidget()
main.setLayout(main_layout)
main.setObjectName("main_screen")
return main
def submissions_ui(self):
heading = QLabel('My Submissions')
heading.setObjectName('main_screen_heading')
view_submission_button = QPushButton('View Submission')
view_submission_button.setFixedSize(200, 50)
view_submission_button.clicked.connect(lambda: self.view_submission(submission_table.selectionModel().currentIndex().row()))
view_submission_button.setObjectName('submit')
submission_model = self.submission_models(self.db, 'my_submissions')
submission_model.setHeaderData(0, Qt.Horizontal, 'Run Id')
submission_model.setHeaderData(1, Qt.Horizontal, 'Verdict')
submission_model.setHeaderData(2, Qt.Horizontal, 'Language')
submission_model.setHeaderData(3, Qt.Horizontal, 'Problem Number')
submission_model.setHeaderData(4, Qt.Horizontal, 'Time')
submission_table = self.generate_view(submission_model)
submission_table.doubleClicked.connect(
lambda: self.view_submission(
submission_table.selectionModel().currentIndex().row()
))
head_layout = QHBoxLayout()
head_layout.addWidget(heading)
# head_layout.addWidget(view_submission_button, alignment=Qt.AlignRight)
head_widget = QWidget()
head_widget.setLayout(head_layout)
main_layout = QVBoxLayout()
main_layout.addWidget(head_widget)
main_layout.addWidget(submission_table)
main_layout.setStretch(0, 5)
main_layout.setStretch(1, 95)
main = QWidget()
main.setLayout(main_layout)
main.setObjectName("main_screen")
main.show()
return main, submission_model
def submit_ui(self):
heading = QLabel('Submit Solution')
heading.setObjectName('main_screen_heading')
self.drop_down = QHBoxLayout()
ui_widgets.language_box = QComboBox()
ui_widgets.language_box.setGeometry(QRect(10, 10, 491, 31))
ui_widgets.language_box.setFixedWidth(200)
ui_widgets.language_box.setFixedHeight(40)
ui_widgets.language_box.setObjectName(("language_box_content"))
allowed_problems = eval(config["Languages"])
for i in allowed_problems:
ui_widgets.language_box.addItem(i)
ui_widgets.problem_box = QComboBox()
ui_widgets.problem_box.setGeometry(QRect(10, 10, 491, 31))
ui_widgets.problem_box.setFixedWidth(250)
ui_widgets.problem_box.setFixedHeight(40)
ui_widgets.problem_box.setObjectName("language_box_content")
for i in range(config["No_of_Problems"]):
ui_widgets.problem_box.addItem("Problem "+str(i+1))
self.submit_solution = QPushButton('Submit', self)
self.submit_solution.setObjectName('submit')
self.submit_solution.setFixedSize(200, 50)
self.submit_solution.clicked.connect(lambda:ui_widgets.submit_call(self, self.data_changed_flag,ui_widgets))
self.drop_down.addWidget(ui_widgets.language_box)
self.drop_down.addWidget(ui_widgets.problem_box)
self.drop_down.addStretch(4)
self.drop_down.addWidget(self.submit_solution)
self.drop_down.setSpacing(10)
self.drop_widget = QWidget()
self.drop_widget.setContentsMargins(10, 0, 0, 0)
self.drop_widget.setLayout(self.drop_down)
ui_widgets.text_area = QPlainTextEdit()
# ui_widgets.text_area.setFixedHeight(650)
ui_widgets.text_area.setObjectName('text_area_content')
ui_widgets.text_area.setPlaceholderText('Paste your code here')
main_layout = QVBoxLayout()
main_layout.addWidget(heading)
main_layout.addWidget(self.drop_widget)
main_layout.addWidget(ui_widgets.text_area)
main_layout.setStretch(0, 5)
main_layout.setStretch(1, 5)
main_layout.setStretch(2, 90)
main = QWidget()
main.setLayout(main_layout)
main.setObjectName("main_screen")
return main
def query_ui(self):
heading = QLabel('Query')
heading.setObjectName('main_screen_heading')
view_query_button = QPushButton('View Query')
view_query_button.setFixedSize(200, 50)
view_query_button.clicked.connect(lambda: self.view_reply(query_table.selectionModel().currentIndex().row()))
view_query_button.setObjectName('submit')
query_model = self.manage_models(self.db, 'my_query')
query_model.setHeaderData(0, Qt.Horizontal, 'Query')
query_model.setHeaderData(1, Qt.Horizontal, 'Response')
query_table = self.generate_view(query_model)
query_table.doubleClicked.connect(
lambda: self.view_reply(
query_table.selectionModel().currentIndex().row()
))
head_layout = QHBoxLayout()
head_layout.addWidget(heading)
# head_layout.addWidget(view_query_button, alignment=Qt.AlignRight)
head_widget = QWidget()
head_widget.setLayout(head_layout)
ui_widgets.ask_query = QLineEdit(self)
ui_widgets.ask_query.setFixedWidth(500)
ui_widgets.ask_query.setFixedHeight(50)
ui_widgets.ask_query.setPlaceholderText(' Problem 1 : Your Query ')
ui_widgets.ask_query.setToolTip(" Send the query in this format only.\n Else it might get ignored.")
ui_widgets.ask_query.setObjectName('ask_query')
self.send_query = QPushButton('Send', self)
self.send_query.setFixedSize(200, 50)
self.send_query.clicked.connect(lambda:ui_widgets.sending(self,self.data_changed_flag))
self.send_query.setObjectName('ask')
main_layout = QVBoxLayout()
main_layout.addWidget(head_widget)
main_layout.addWidget(query_table)
main_layout.addWidget(ui_widgets.ask_query, alignment=Qt.AlignLeft)
main_layout.addWidget(self.send_query, alignment=Qt.AlignLeft)
main_layout.setStretch(0, 5)
main_layout.setStretch(1, 80)
main_layout.setStretch(2, 10)
main_layout.setStretch(3, 5)
main = QWidget()
main.setLayout(main_layout)
main.setObjectName("main_screen")
return main, query_model
def leaderboard_ui(self):
try:
main_layout = QVBoxLayout()
heading = QLabel('Leaderboard')
heading.setObjectName('main_screen_heading')
score_model = self.score_models(self.db, 'score_table')
score_model.setHeaderData(0, Qt.Horizontal, 'RANK')
score_model.setHeaderData(1, Qt.Horizontal, 'TEAM_NAME')
score_model.setHeaderData(2, Qt.Horizontal, 'SCORE')
score_model.setHeaderData(3, Qt.Horizontal, 'PROBLEMS_SOLVED')
score_model.setHeaderData(4, Qt.Horizontal, 'TIME_TAKEN')
score_model.setQuery(
"SELECT rank() over(ORDER BY score DESC,time_taken ASC) as RANK,[TEAM_NAME],[SCORE],[PROBLEMS_SOLVED],[TIME_TAKEN] FROM score_table")
score_table = self.generate_view(score_model)
head_layout = QHBoxLayout()
head_layout.addWidget(heading)
# head_layout.addWidget(view_submission_button, alignment=Qt.AlignRight)
head_widget = QWidget()
head_widget.setLayout(head_layout)
main_layout = QVBoxLayout()
main_layout.addWidget(head_widget)
main_layout.addWidget(score_table)
main_layout.setStretch(0, 5)
main_layout.setStretch(1, 95)
main = QWidget()
main.setLayout(main_layout)
main.setObjectName("main_screen")
main.show()
return main, score_model
except Exception as Error:
print('[ LEADERBOARD ] ' + str(Error))
def about_ui(self):
head1 = QLabel('Made with <3 by Team Bitwise')
head1.setObjectName('about_screen_heading')
head2 = QLabel('Guess what? The BitsOJ project is open source!')
head2.setObjectName('main_screen_content')
head3 = QLabel('Contribute ')
head3.setObjectName('main_screen_content')
link = QLabel("<a href='https://github.com/peeesspee/BitsOJ' style = 'color: #23B2EE'>Here</a>")
link.setObjectName('main_screen_content')
link.setToolTip(
'Opens github repository link in web browser.'
)
link.setTextInteractionFlags(Qt.TextBrowserInteraction)
link.setOpenExternalLinks(True)
link_widget = ui_widgets.get_horizontal_widget(head3, link)
sub_head1 = QLabel('Team BitsOJ')
sub_head1.setObjectName('about_screen_heading_2')
mentor_widget = ui_widgets.get_profile_widget(
'Mentor',
'@rast_7',
'Rajat Asthana',
'rast-7',
'rast7'
)
server_dev_widget = ui_widgets.get_profile_widget(
'Server Dev',
'@valiant1',
'Prakhar Pandey',
'valiant1011',
'valiant1011'
)
client_dev_widget = ui_widgets.get_profile_widget(
'Client/Setup Dev',
'@sachinam',
'Sachinam Srivastava',
'sachinam1397',
'sachinam1397'
)
judge_dev_widget = ui_widgets.get_profile_widget(
'Judge Dev',
'@ps',
'Prashant Singh',
'ps0798',
'ps0798'
)
cards_widget = QWidget()
cards_layout = QHBoxLayout(cards_widget)
cards_layout.addStretch(5)
cards_layout.addWidget(mentor_widget)
cards_layout.addStretch(2)
cards_layout.addWidget(server_dev_widget)
cards_layout.addStretch(2)
cards_layout.addWidget(client_dev_widget)
cards_layout.addStretch(2)
cards_layout.addWidget(judge_dev_widget)
cards_layout.addStretch(5)
cards_layout.setContentsMargins(0, 10, 0, 10)
main_layout = QVBoxLayout()
main_layout.addStretch(5)
main_layout.addWidget(sub_head1)
main_layout.addStretch(1)
main_layout.addWidget(cards_widget)
main_layout.addStretch(3)
main_layout.addWidget(head1)
main_layout.addWidget(head2)
main_layout.addWidget(link_widget)
main_layout.addStretch(2)
main_layout.setAlignment(head1, Qt.AlignCenter)
main_layout.setAlignment(head2, Qt.AlignCenter)
main_layout.setAlignment(link_widget, Qt.AlignCenter)
main_layout.setAlignment(sub_head1, Qt.AlignCenter)
main = QWidget()
main.setLayout(main_layout)
return main
def get_profile_widget(
title = 'None',
username = 'None',
name = 'None',
github_id = 'None',
linkedin_id = 'None'
):
# Shadow effect initialisation
shadow_effect = QGraphicsDropShadowEffect()
shadow_effect.setBlurRadius(15)
shadow_effect.setOffset(0)
shadow_effect.setColor(QColor(0, 0, 0, 255))
# Get cards for team members
top_layout = QVBoxLayout()
title_widget = QLabel(title)
title_widget.setObjectName('role_text')
banner_widget = QLabel(username)
banner_widget.setObjectName('banner_text')
banner_overlay_layout = QHBoxLayout()
banner_overlay_layout.addWidget(banner_widget)
banner_overlay_widget = QWidget()
banner_overlay_widget.setLayout(banner_overlay_layout)
banner_overlay_widget.setObjectName('banner_overlay')
# banner_widget.setGraphicsEffect(shadow_effect)
name_widget = QLabel(name)
name_widget.setObjectName('card_content')
github_link = "https://www.github.com/" + github_id
linkedin_link = "https://www.linkedin.com/in/" + linkedin_id
github_id_heading = QLabel('Github')
github_pixmap = QPixmap('./Elements/github.png')
github_id_heading.setPixmap(github_pixmap)
github_id_heading.setFixedSize(48, 48)
github_id_widget = QLabel(
"<a href='" + github_link + "' style = 'color: #23B2EE'>" + github_id + "</a>"
)
github_id_widget.setTextInteractionFlags(Qt.TextBrowserInteraction)
github_id_widget.setOpenExternalLinks(True)
github_id_widget.setObjectName('card_content')
github_hwidget = ui_widgets.get_horizontal_widget(github_id_heading, github_id_widget)
linkedin_id_heading = QLabel('LinkedIn')
linkedin_pixmap = QPixmap('./Elements/linkedin.png')
linkedin_id_heading.setPixmap(linkedin_pixmap)
linkedin_id_heading.setFixedSize(48, 48)
linkedin_id_widget = QLabel(
"<a href='" + linkedin_link + "' style = 'color: #23B2EE'>" + linkedin_id + "</a>"
)
linkedin_id_widget.setTextInteractionFlags(Qt.TextBrowserInteraction)
linkedin_id_widget.setOpenExternalLinks(True)
linkedin_id_widget.setObjectName('card_content')
linkedin_hwidget = ui_widgets.get_horizontal_widget(linkedin_id_heading, linkedin_id_widget)
top_layout.addWidget(title_widget)
top_layout.addWidget(banner_overlay_widget)
top_layout.addWidget(name_widget)
top_layout.addWidget(github_hwidget)
top_layout.addWidget(linkedin_hwidget)
top_layout.addStretch(1)
top_layout.setAlignment(title_widget, Qt.AlignCenter)
top_widget = QWidget()
top_widget.setLayout(top_layout)
top_widget.setFixedWidth(270)
top_widget.setFixedHeight(350)
top_widget.setObjectName('card')
top_widget.setGraphicsEffect(shadow_effect)
top_widget.setMinimumSize(270, 300)
return top_widget
def get_horizontal_widget(widget_1, widget_2):
layout = QHBoxLayout()
layout.addWidget(widget_1)
layout.addWidget(widget_2)
layout.addStretch(1)
widget = QWidget()
widget.setLayout(layout)
return widget
def submit_call(self, data_changed_flag,ui_widgets):
if data_changed_flag[0] == 0:
QMessageBox.warning(self, 'Message', 'Contest not yet started.\nPlease wait.')
elif data_changed_flag[0] == 4:
QMessageBox.warning(self, 'Message', 'Contest has been ENDED')
elif data_changed_flag[0] == 3:
QMessageBox.warning(self, 'Message', 'Your Time Up.\n Now you cannot submit solution')
else:
try:
config = handle_config.read_config_json()
local_time = time.localtime()
time_stamp = time.strftime("%H:%M:%S", local_time)
textbox_value = ui_widgets.text_area.toPlainText()
selected_language = str(ui_widgets.language_box.currentText())
problem_number = str(ui_widgets.problem_box.currentText())
print(problem_number)
problem_code = config["Problems"][str(ui_widgets.problem_box.currentText())]
problem_code = problem_code[1]
if(selected_language == 'C'):
extention = '.c'
language_code = 'GCC'
elif(selected_language == 'C++'):
extention = '.cpp'
language_code = 'CPP'
elif(selected_language == 'JAVA'):
extention = '.java'
language_code = 'JVA'
elif(selected_language == 'PYTHON-3'):
extention = '.py'
language_code = 'PY3'
else:
extention = '.py'
language_code = 'PY2'
local_id = manage_local_ids.get_new_id()
client_id = config["client_id"]
client_key = config["client_key"]
username = config["Username"]
print(local_id)
submission_management.insert_verdict(
local_id,
client_id,
0,
'Queued',
selected_language,
language_code,
problem_code,
problem_number,
time_stamp,
textbox_value,
extention
)
data_changed_flag[1] = 1
send_code.solution_request(
problem_code,
selected_language,
time_stamp,
textbox_value,
local_id,
client_key,
username,
config["IP"]
)
ui_widgets.text_area.setPlainText('')
except Exception as Error:
print(str(Error))
self.submission_counter = 0
return
# def show_problem(i,data_changed_flag,self):
# if data_changed_flag[0] == 0:
# QMessageBox.warning(self, 'Message', 'Contest not yet started.\nPlease wait.')
# else:
# webbrowser.open("Problems\\Problem_"+str(i)+'.pdf')
# return
# print('Button {0} clicked'.format(i))
def sending(self,data_changed_flag):
if data_changed_flag[0] == 0:
QMessageBox.warning(self, 'Message', 'Contest not yet started.\nPlease wait.')
elif data_changed_flag[0] == 4:
QMessageBox.warning(self, 'Message', 'Contest has been ENDED')
elif data_changed_flag[0] == 3:
QMessageBox.warning(self, 'Message', 'Your Time Up.\n Now you cannot submit any query')
else:
config = handle_config.read_config_json()
client_id = config["client_id"]
client_key = config["client_key"]
query = ui_widgets.ask_query.text()
if(query == ''):
QMessageBox.warning(self, 'Message', "Query Cannot be empty")
# print("Don't be stupid")
elif(len(query) > 499):
QMessageBox.warning(self, 'Message', "Length of query cannot exceed 500 words")
# print('Length of query cannot exceed 500 words')
else:
query_management.insert_query(query,'Waiting for response')
data_changed_flag[2] = 1
send_code.query_request(
client_id,
client_key,
query,
config["Username"],
config["IP"]
)
QMessageBox.warning(self, 'Message', 'Your Query has been successfully send')
return
###################################################################################
class view_query_ui(QMainWindow):
query = ''
response = ''
def __init__(self,data_changed_flags, query, response,parent=None):
super(view_query_ui, self).__init__(parent)
self.data_changed_flags = data_changed_flags
view_query_ui.query = query
view_query_ui.response = response
self.setWindowTitle('View Query')
self.setFixedSize(600,550)
main = self.main_query_view_ui()
self.setCentralWidget(main)
# self.setStyleSheet(open('Elements/style.qss', "r").read())
# self.setWindowFlag(Qt.WindowCloseButtonHint, False)
return
def main_query_view_ui(self):
head = QLabel('View')
query_heading = QLabel('Query: ')
response_heading = QLabel('Response: ')
cursor = QTextCursor()
cursor.setPosition(0)
query = view_query_ui.query
query_text = QPlainTextEdit()
query_text.appendPlainText(view_query_ui.query)
query_text.setReadOnly(True)
query_text.setTextCursor(cursor)
# query_text.setObjectName('text_area_content')
response = view_query_ui.response
response_text = QPlainTextEdit()
response_text.appendPlainText(view_query_ui.response)
response_text.setReadOnly(True)
response_text.setTextCursor(cursor)
# response_text.setObjectName('text_area_content')
cancel_button = QPushButton('Close')
cancel_button.setFixedSize(150, 30)
cancel_button.clicked.connect(lambda:view_query_ui.cancel(self))
cancel_button.setDefault(True)
main_layout = QVBoxLayout()
main_layout.addWidget(head, alignment=Qt.AlignCenter)
main_layout.addWidget(query_heading)
main_layout.addWidget(query_text)
main_layout.addWidget(response_heading)
main_layout.addWidget(response_text)
main_layout.addWidget(cancel_button, alignment=Qt.AlignRight)
main = QWidget()
main.setLayout(main_layout)
head.setObjectName('view3')
query_heading.setObjectName('view')
response_heading.setObjectName('view')
query_text.setObjectName('text')
response_text.setObjectName('text')
cancel_button.setObjectName('submit')
main.setObjectName('query_submission_widget')
return main
def cancel(self):
self.close()
class view_submission_ui(QMainWindow):
source_file = ''
verdict = ''
language = ''
def __init__(self,data_changed_flags, source_file, verdict, language, run_id, parent=None):
super(view_submission_ui, self).__init__(parent)
self.data_changed_flags = data_changed_flags
view_submission_ui.source_file = source_file
view_submission_ui.verdict = verdict
view_submission_ui.language = language
self.setWindowTitle('Run ID : ' + str(run_id))
self.setFixedSize(900,800)
main = self.main_submission_view_ui()
self.setCentralWidget(main)
# self.setStyleSheet(open('Elements/style.qss', "r").read())
return
def main_submission_view_ui(self):
print(view_submission_ui.source_file)
with open("Solution/"+view_submission_ui.source_file, 'r') as solu:
data = solu.read()
cursor = QTextCursor()
cursor.setPosition(0)
submission_text = QPlainTextEdit()
submission_text.appendPlainText(data)
submission_text.setReadOnly(True)
submission_text.setTextCursor(cursor)
# submission_text.cursorForPosition(0)
# submission_text.QCursor.pos(0)
bottom_layout = QHBoxLayout()
verdict = QLabel("Judge's Verdict :")
verdict_layout = QLabel(view_submission_ui.verdict)
language = QLabel('Language : ')
language_layout = QLabel(view_submission_ui.language)
bottom_layout.addWidget(verdict)
bottom_layout.addWidget(verdict_layout)
bottom_layout.addWidget(language)
bottom_layout.addWidget(language_layout)
bottom_widget = QWidget()
bottom_widget.setLayout(bottom_layout)
main_layout = QVBoxLayout()
main_layout.addWidget(submission_text)
main_layout.addWidget(bottom_widget)
main = QWidget()
main.setLayout(main_layout)
submission_text.setObjectName('text')
verdict.setObjectName('view')
if view_submission_ui.verdict == 'AC':
verdict_layout.setObjectName('view1')
else:
verdict_layout.setObjectName('view2')
language.setObjectName('view')
language_layout.setObjectName('view3')
main.setObjectName('query_submission_widget')
return main
class view_problem_ui(QMainWindow):
def __init__(self, i, data_changed_flags, problem_file, parent=None):
super(view_problem_ui, self).__init__(parent)
self.data_changed_flags = data_changed_flags
self.setWindowTitle('Problem ' + str(i))
self.resize(900,830)
main = self.main_problem_view_ui(i, problem_file)
self.setCentralWidget(main)
return
def main_problem_view_ui(self, i, problem_file):
main_scroll = QScrollArea()
main_scroll.setObjectName('view_problem')
main_layout = QVBoxLayout()
main_layout.setObjectName('view_problem')
heading = QLabel(problem_file["Problem Name"])
heading.setObjectName('problem_heading')
problem_statement = QLabel(problem_file["Statement"])
problem_statement.setWordWrap(True)
problem_statement.setObjectName('problem_text')
problem_code_label = QLabel('Problem Code : ' + problem_file["Problem Code"])
problem_code_label.setObjectName('problem_heading_2')
input_label = QLabel('Input :')
input_label.setObjectName('problem_heading_2')
input_statement = QLabel(problem_file["Input"])
input_statement.setWordWrap(True)
input_statement.setObjectName('problem_text')
output_label = QLabel('Output :')
output_label.setObjectName('problem_heading_2')
output_statement = QLabel(problem_file["Output"])
output_statement.setWordWrap(True)
output_statement.setObjectName('problem_text')
constraints_label = QLabel('Constraints :')
constraints_label.setObjectName('problem_heading_2')
constraints_statement = QLabel(problem_file["Constraints"])
constraints_statement.setWordWrap(True)
constraints_statement.setObjectName('problem_text')
time_limit = QHBoxLayout()
time_limit_label = QLabel('Time Limit :')
time_limit_label.setObjectName('problem_heading_4')
time_limit_statement = QLabel(str(problem_file["Time Limit"]))
time_limit_statement.setWordWrap(True)
time_limit_statement.setObjectName('problem_heading_4')
time_limit.addWidget(time_limit_label)
time_limit.addWidget(time_limit_statement)
time_limit.addStretch(0)
time_limit.addSpacing(1)
time_limit.addWidget(problem_code_label, alignment = Qt.AlignRight)
time_limit_widget = QWidget()
time_limit_widget.setLayout(time_limit)
example_label = QLabel('Example : ')
example_label.setObjectName('problem_heading_2')
example_input_label = QLabel('Input :')
example_input_label.setObjectName('problem_heading_3')
example_input_statement = QLabel(problem_file["Example Input"])
example_input_statement.setWordWrap(True)
example_input_statement.setObjectName('problem_text_2')
example_output_label = QLabel('Output :')
example_output_label.setObjectName('problem_heading_3')
example_output_statement = QLabel(problem_file["Example Output"])
example_output_statement.setWordWrap(True)
example_output_statement.setObjectName('problem_text_2')
author_label = QLabel('Author : ')
author_label.setObjectName('problem_heading_2_author')
name_label = QLabel(problem_file["Author"])
name_label.setObjectName('problem_heading_2_white')
hwidget = QWidget()
hlayout = QHBoxLayout(hwidget)
hlayout.addWidget(author_label)
hlayout.addWidget(name_label)
hlayout.addStretch(1)
main_layout.addWidget(heading, alignment = Qt.AlignCenter)
main_layout.addWidget(time_limit_widget)
main_layout.addWidget(problem_statement)
main_layout.addWidget(input_label)
main_layout.addWidget(input_statement)
main_layout.addWidget(output_label)
main_layout.addWidget(output_statement)
main_layout.addWidget(constraints_label)
main_layout.addWidget(constraints_statement)
main_layout.addWidget(example_label)
main_layout.addWidget(example_input_label)
main_layout.addWidget(example_input_statement)
main_layout.addWidget(example_output_label)
main_layout.addWidget(example_output_statement)
main_layout.addWidget(hwidget)
main_layout.addStretch(0)
main_layout.addSpacing(1)
main = QWidget()
main.setLayout(main_layout)
main_scroll.setWidget(main)
main_scroll.setWidgetResizable(True)
main_scroll.setFixedWidth(880)
layout = QVBoxLayout()
layout.addWidget(main_scroll)
main_widget = QWidget()
main_widget.setLayout(layout)
main_widget.setStyleSheet(
'''QWidget{ background : #171B1F;}'''
)
return main_widget
|
the-stack_0_8327 | import numpy
EARTH_R = 6.371E6
EARTH_MU = 3.986004418E14 # G * M_EARTH
SQRT_EARTH_MU = numpy.sqrt(EARTH_MU)
class Body(object):
POSITION_VISUALISATIONS = {'symbol': 0, 'rv': 1, 'dot': 2}
ORBIT_VISUALISATIONS = {'all': 0, 'orbit': 1, 'none': 2}
def __init__(self, r, v, t0, orbit_color=(1.0, 1.0, 0.0, 1.0), stipple=None, record_trajectory=False):
if isinstance(r, tuple):
r = numpy.array(r)
if isinstance(v, tuple):
v = numpy.array(v)
self.r = r
self.v = v
self.t0 = t0
self.orbit_color = orbit_color
self.stipple = stipple
self.record_trajectory = record_trajectory
self.trajectory = []
self.collided_time = None # If a collision with the planet happens at any point in time, this will be set
self.r0 = r
self.r0_ = numpy.linalg.norm(self.r0)
self.v0 = v
self.calc_orbital_params()
self.pos_viz_mode = Body.POSITION_VISUALISATIONS['symbol']
self.orbit_viz_mode = Body.ORBIT_VISUALISATIONS['all']
# For drawing partial orbits, these would change to the angles required
self.orbit_start, self.orbit_end = 0.0, 360.0
def clone(self):
new_body = Body(self.r, self.v, self.t0, self.orbit_color, self.stipple, self.record_trajectory)
new_body.pos_viz_mode = self.pos_viz_mode
self.pos_viz_mode = Body.POSITION_VISUALISATIONS['dot']
self.orbit_viz_mode = Body.ORBIT_VISUALISATIONS['orbit']
return new_body
def calc_orbital_params(self):
#Calculates Keplerian orbital parameters based on the state vectors
#This method should be called after each change to velocity vector
#a = - mu / (v^2 - 2 * mu/r)
v_2 = numpy.vdot(self.v, self.v)
r_ = numpy.linalg.norm(self.r)
# TODO: this won't work for parabolic trajectories (as the denominator is 0)!
self.a = -EARTH_MU / (v_2 - 2 * EARTH_MU / r_)
#T = 2*Pi*sqrt(a^3/ni)
# TODO: again, orbital period is not defined for non-bound orbits
self.T = 2.0 * numpy.pi * numpy.sqrt(self.a**3 / EARTH_MU)
#Calculate specific relative angular momentum h = r X v
h = numpy.cross(self.r, self.v)
h_2 = numpy.vdot(h, h)
h_ = numpy.sqrt(h_2)
#Calculate eccentricity vector e = (v X h) / EARTH_MU - r/|r|
e = numpy.cross(self.v, h) / EARTH_MU - self.r/r_
self.e = numpy.linalg.norm(e)
i_rad = numpy.arccos(h[2] / h_) #h[2] = hz
self.i = numpy.degrees(i_rad)
#However, some soruces state that if hz < 0 then inclination is 180 deg - i; should check this
#n is the vector pointing to the ascending node
n = numpy.array((-h[1], h[0], 0))
n_ = numpy.linalg.norm(n)
if i_rad == 0.0:
o_rad = 0.0
else:
if n[1] >= 0.0: #ie. if h[0] >= 0
o_rad = numpy.arccos(n[0] / n_)
else:
o_rad = 2 * numpy.pi - numpy.arccos(n[0] / n_)
self.o = numpy.degrees(o_rad)
#Calculate ni (true anomaly)
q = numpy.vdot(self.r, self.v) #What the hell is q?
ni_x = h_2 / (r_ * EARTH_MU) - 1.0
ni_y = h_ * q / (r_ * EARTH_MU)
self.ni = numpy.degrees(numpy.arctan2(ni_y, ni_x))
if self.e == 0.0:
#For circular orbit w is 0 by convention
self.w = 0.0
else:
if n_ == 0.0:
#For equatorial orbit
self.w = numpy.degrees(numpy.arctan2(e[1], e[0]))
else:
self.w = numpy.degrees(numpy.arccos(numpy.vdot(n, e) / (n_ * self.e)))
if e[2] < 0.0:
self.w = 360.0 - self.w
if self.w < 0.0:
self.w = 360.0 + self.w
self.rp = self.a * (1.0 - self.e) #Periapsis distance
self.ra = self.a * (1.0 + self.e) #Apoapsis distance
def apply_dv(self, dv, t):
self.v += dv
self.v0 = self.v
self.r0 = self.r
self.r0_ = numpy.linalg.norm(self.r0)
self.t0 = t
self.calc_orbital_params()
def calc_state_vectors(self, t):
# Based on Keplerian orbital parameters calculates state vectors at time t0 + t
# Algorithm from "Fundamentals of astrodynamics" by Roger R. Bate, Donald D. Mueller and Jerry E. White
def evaluate_t_dt(x):
# Evaluates t(x) and dt(x)/dx and returns them as a tuple.
# We use these to find x via Newton's numerical approximation method.
# Both values are evaluated in one function (as opposed to evaluate_t and evaluate_dt)
# to avoid calculating z, sqrt(z), C, S twice.
z = x**2 / self.a
sqrt_z = numpy.sqrt(z)
C = (1.0 - numpy.cos(sqrt_z)) / z
S = (sqrt_z - numpy.sin(sqrt_z)) / numpy.sqrt(z**3)
t = (numpy.vdot(self.r0, self.v0) / SQRT_EARTH_MU * x**2 * C + (1.0 - self.r0_ / self.a) * x**3 * S + self.r0_ * x) / SQRT_EARTH_MU
dt = (x**2 * C + numpy.vdot(self.r0, self.v0) / SQRT_EARTH_MU * x * (1.0 - z * S) + self.r0_ * (1.0 - z * C)) / SQRT_EARTH_MU
return (t, dt)
# Don't move object once it has collided with the surface
if self.collided_time and t > self.collided_time:
return
# First we find x using Newton's method. It converges remarkably quickly.
# For elliptical orbits (including circular), we use sqrt(mu)*(t-t0)/a as the first approximation
# NOTE: Parabolic and hyperbolic orbits are not supported at the moment!
# We simplfy by setting t0 to be 0 and solving for delta_t, instead of solving for t with some non-zero t0
# (which is more complicated)
delta_t = t - self.t0
x_guess = SQRT_EARTH_MU * delta_t / self.a #Initial guess
t_guess, slope = evaluate_t_dt(x_guess)
while abs(delta_t - t_guess) > 1.0E-10:
x_guess = x_guess + (delta_t - t_guess) / slope
t_guess, slope = evaluate_t_dt(x_guess)
x = x_guess
#TODO: rewrite above into a for loop with a break so that the loop is guaranteed to exit as opposed to now
# x is now the value we've been looking for
# Next, we calculate f, g, f_dot and g_dot and from these r and v
z = x**2 / self.a
sqrt_z = numpy.sqrt(z)
C = (1.0 - numpy.cos(sqrt_z)) / z
S = (sqrt_z - numpy.sin(sqrt_z)) / numpy.sqrt(z**3)
f = 1.0 - (x**2 / self.r0_) * C
g = delta_t - x**3 / SQRT_EARTH_MU * S
self.r = f * self.r0 + g * self.v0
r_ = numpy.linalg.norm(self.r)
g_dot = 1.0 - x**2 / r_ * C
f_dot = SQRT_EARTH_MU / (self.r0_ * r_) * x * (z * S - 1.0)
self.v = f_dot * self.r0 + g_dot * self.v0
if self.record_trajectory:
self.trajectory.append(self.r)
if numpy.linalg.norm(self.r) < EARTH_R:
self.v = numpy.array((0.0, 0.0, 0.0))
self.collided_time = t
def prograde(self):
"""Returns a unit vector in the prograde direction, ie. normalize(self.v)."""
return self.v / numpy.linalg.norm(self.v)
def retrograde(self):
"""Returns a unit vector in the retrograde direction, ie. normalize(-self.v)."""
return -self.prograde()
def orbit_normal(self):
"""Returns a unit vector in the orbit normal direction, ie. normalize(self.r x self.v)."""
rxv = numpy.cross(self.r, self.v)
return rxv / numpy.linalg.norm(rxv)
def orbit_antinormal(self):
return -self.orbit_normal()
@staticmethod
def generate_circular_equatorial_orbit(alt, orbit_color=(1.0, 1.0, 0.0, 1.0)):
# Generates a circular equatorial orbit at the given altitude.
# For circular orbits, v = sqrt(gamma/r)
r = (EARTH_R + alt, 0.0, 0.0)
r_ = numpy.linalg.norm(r)
v = (0.0, numpy.sqrt(EARTH_MU / r_), 0.0)
body = Body(r, v, 0.0, orbit_color)
return Body(body.r, body.v, 0.0, orbit_color)
@staticmethod
def generate_random_orbit():
import random
rho = random.uniform(EARTH_R + 200.0, 2 * EARTH_R + 200)
azimuth = random.uniform(0.0, 2.0 * numpy.pi)
# We don't want orbits with more than 45 degrees inclination
elevation = random.uniform(-numpy.pi / 4.0, numpy.pi / 4.0)
x = rho * numpy.cos(elevation) * numpy.cos(azimuth)
y = rho * numpy.cos(elevation) * numpy.sin(azimuth)
z = rho * numpy.sin(elevation)
r = numpy.array((x, y, z))
z_axis = numpy.array((0.0, 0.0, -rho))
v_unit = numpy.cross(r, z_axis)
v_unit /= numpy.linalg.norm(v_unit)
circular_velocity = numpy.sqrt(EARTH_MU / rho)
velocity = random.uniform(1.0, 1.2) * circular_velocity
v = v_unit * velocity
def random_color():
return 0.50 + random.randint(0, 2) * 0.25
color = (random_color(), random_color(), random_color(), 1.0)
stipple = random.choice((None, 0b0101010101010101, 0b0110011001100110))
body = Body(r, v, 0.0, color, stipple)
body.calc_state_vectors(random.uniform(-3600.0, 3600.0))
return Body(body.r, body.v, 0.0, color, stipple)
|
the-stack_0_8330 | import os
import time
import getopt
import socket
import sys
from .snakeoil3_gym import ServerState,DriverAction
_practice_path = '/home/averma/torcs/torcs-1.3.7/src/raceman/practice.xml'
# Initialize help messages
ophelp= 'Options:\n'
ophelp+= ' --host, -H <host> TORCS server host. [localhost]\n'
ophelp+= ' --port, -p <port> TORCS port. [3001]\n'
ophelp+= ' --id, -i <id> ID for server. [SCR]\n'
ophelp+= ' --steps, -m <#> Maximum simulation steps. 1 sec ~ 50 steps. [100000]\n'
ophelp+= ' --episodes, -e <#> Maximum learning episodes. [1]\n'
ophelp+= ' --track, -t <track> Your name for this track. Used for learning. [unknown]\n'
ophelp+= ' --stage, -s <#> 0=warm up, 1=qualifying, 2=race, 3=unknown. [3]\n'
ophelp+= ' --debug, -d Output full telemetry.\n'
ophelp+= ' --help, -h Show this help.\n'
ophelp+= ' --version, -v Show current version.'
usage= 'Usage: %s [ophelp [optargs]] \n' % sys.argv[0]
usage= usage + ophelp
version= "20130505-2"
class Client(object):
def __init__(self,H=None,p=None,i=None,e=None,t=None,s=None,d=None,vision=False, track='practice.xml'):
#################################### modified by redwan
self.__gui = vision
self.__timeout = 10000
self.__data_size = 2 ** 17
self.__server = Server(vision,track)
self.__socket = self.__create_socket()
# If you don't like the option defaults, change them here.
self.vision = vision
self.host= 'localhost'
self.port= 3001
self.sid= 'SCR'
self.maxEpisodes=1 # "Maximum number of learning episodes to perform"
self.trackname= 'unknown'
self.stage= 3 # 0=Warm-up, 1=Qualifying 2=Race, 3=unknown <Default=3>
self.debug= False
self.maxSteps= 100000 # 50steps/second
if H: self.host= H
if p: self.port= p
if i: self.sid= i
if e: self.maxEpisodes= e
if t: self.trackname= t
if s: self.stage= s
if d: self.debug= d
self.__quickrace_xml_path = os.path.expanduser('~') + '/.torcs/config/raceman/{}'.format(track)
self.S= ServerState()
self.R= DriverAction()
self.__connect_to_server()
# self.parse_the_command_line()
def __init_server(self):
os.system('pkill torcs')
time.sleep(0.001)
if self.__gui:
# if self.__cmd_exists('optirun'):
# os.system('optirun torcs -nofuel -nolaptime -s -t {} >/dev/null &'.format(self.__timeout))
# else:
os.system('torcs -nofuel -nolaptime -s -t {} >/dev/null &'.format(self.__timeout))
time.sleep(2)
os.system('sh utilities/autostart.sh')
else:
os.system('torcs -nofuel -nolaptime -t 50000 -r '.format(
self.__timeout) + self.__quickrace_xml_path + ' >/dev/null &')
# print('Server created!')
time.sleep(0.001)
def parse_the_command_line(self):
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'H:p:i:m:e:t:s:dhv',
['host=', 'port=', 'id=', 'steps=',
'episodes=', 'track=', 'stage=',
'debug', 'help', 'version'])
except getopt.error as why:
print('getopt error: %s\n%s' % (why, usage))
sys.exit(-1)
try:
for opt in opts:
if opt[0] == '-h' or opt[0] == '--help':
print(usage)
sys.exit(0)
if opt[0] == '-d' or opt[0] == '--debug':
self.debug = True
if opt[0] == '-H' or opt[0] == '--host':
self.host = opt[1]
if opt[0] == '-i' or opt[0] == '--id':
self.sid = opt[1]
if opt[0] == '-t' or opt[0] == '--track':
self.trackname = opt[1]
if opt[0] == '-s' or opt[0] == '--stage':
self.stage = int(opt[1])
if opt[0] == '-p' or opt[0] == '--port':
self.port = int(opt[1])
if opt[0] == '-e' or opt[0] == '--episodes':
self.maxEpisodes = int(opt[1])
if opt[0] == '-m' or opt[0] == '--steps':
self.maxSteps = int(opt[1])
if opt[0] == '-v' or opt[0] == '--version':
print('%s %s' % (sys.argv[0], version))
sys.exit(0)
except ValueError as why:
print('Bad parameter \'%s\' for option %s: %s\n%s' % (
opt[1], opt[0], why, usage))
sys.exit(-1)
if len(args) > 0:
print('Superflous input? %s\n%s' % (', '.join(args), usage))
sys.exit(-1)
def __connect_to_server(self):
tries = 3
while True:
sensor_angles = "-45 -19 -12 -7 -4 -2.5 -1.7 -1 -.5 0 .5 1 1.7 2.5 4 7 12 19 45"
initmsg = '%s(init %s)' % (self.sid, sensor_angles)
try:
self.__socket.sendto(initmsg.encode(), (self.host, self.port))
except socket.error:
sys.exit(-1)
sockdata = str()
try:
sockdata, address = self.__socket.recvfrom(self.__data_size)
sockdata = sockdata.decode('utf-8')
except socket.error:
# print("Waiting for __server on __port " + str(self.__port))
tries -= 1
if tries == 0:
# print("Server didn't answer, sending restart signal")
self.__server.restart()
identify = '***identified***'
if identify in sockdata:
# print("Client connected on __port " + str(self.__port))
break
def __send_message(self, message):
try:
self.__socket.sendto(message.encode(), (self.host, self.port))
except socket.error as emsg:
print(u"Error sending to __server: %s Message %s" % (emsg[1], str(emsg[0])))
sys.exit(-1)
def get_servers_input(self):
sockdata = str()
'''Server's input is stored in a ServerState object'''
if not self.__socket: return
sockdata = str()
while True:
try:
# Receive server data
sockdata, addr = self.__socket.recvfrom(self.__data_size)
sockdata = sockdata.decode('utf-8')
except socket.error as emsg:
print('.', end=' ')
# print "Waiting for data on %d.............." % self.port
if '***identified***' in sockdata:
print("Client connected on %d.............." % self.port)
continue
elif '***shutdown***' in sockdata:
print((("Server has stopped the race on %d. " +
"You were in %d place.") %
(self.port, self.S.d['racePos'])))
self.shutdown()
return
elif '***restart***' in sockdata:
# What do I do here?
print("Server has restarted the race on %d." % self.port)
# I haven't actually caught the server doing this.
self.shutdown()
return
elif not sockdata: # Empty?
continue # Try again.
else:
self.S.parse_server_str(sockdata)
if self.debug:
sys.stderr.write("\x1b[2J\x1b[H") # Clear for steady output.
print(self.S)
break # Can now return from this function.
if sockdata:
return self.__parse_server_string(sockdata)
# while True:
# try:
# sockdata, address = self.__socket.recvfrom(self.__data_size)
# sockdata = sockdata.decode('utf-8')
# except socket.error:
# print('', end='')
# if sockdata:
# return self.__parse_server_string(sockdata)
def __parse_server_string(self, server_string):
track_data = {}
server_string = server_string.strip()[:-1]
server_string_list = server_string.strip().lstrip('(').rstrip(')').split(')(')
for i in server_string_list:
w = i.split(' ')
track_data[w[0]] = self.__destringify(w[1:])
return track_data
def shutdown(self):
if not self.__socket: return
print(("Race terminated or %d steps elapsed. Shutting down %d."
% (self.maxSteps,self.port)))
self.__socket.close()
self.R.d['meta'] = 1
self.__socket = None
def respond_to_server(self):
if not self.__socket: return
try:
message = repr(self.R)
self.__socket.sendto(message.encode(), (self.host, self.port))
except socket.error as emsg:
print("Error sending to server: %s Message %s" % (emsg[1], str(emsg[0])))
sys.exit(-1)
if self.debug: print(self.R.fancyout())
def __destringify(self, string):
if not string:
return string
if type(string) is str:
try:
return float(string)
except ValueError:
print("Could not find a value in %s" % string)
return string
elif type(string) is list:
if len(string) < 2:
return self.__destringify(string[0])
else:
return [self.__destringify(i) for i in string]
@property
def restart(self):
# print('Restarting __server...')
self.__socket = self.__create_socket()
return self.__connect_to_server()
@staticmethod
def __create_socket():
try:
so = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
print('Error: Could not create __socket...')
sys.exit(-1)
so.settimeout(1)
return so
class Server:
def __init__(self, gui, track, timeout=10000):
self.__gui = gui
self.__quickrace_xml_path = os.path.expanduser('~') + '/.torcs/config/raceman/{}'.format(track)
# self.__create_race_xml(track, track_type)
self.__timeout = timeout
self.__init_server()
# @staticmethod
# def __cmd_exists(cmd):
# return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def __init_server(self):
os.system('pkill torcs')
time.sleep(0.001)
if self.__gui:
# if self.__cmd_exists('optirun'):
# os.system('optirun torcs -nofuel -nolaptime -s -t {} >/dev/null &'.format(self.__timeout))
# else:
os.system('torcs -nofuel -nolaptime -s -t {} >/dev/null &'.format(self.__timeout))
time.sleep(2)
os.system('sh utilities/autostart.sh')
else:
os.system('torcs -nofuel -nolaptime -t 50000 -r '.format(self.__timeout) + self.__quickrace_xml_path + ' >/dev/null &')
# print('Server created!')
time.sleep(0.001)
def restart(self):
# print('Restarting __server...')
self.__init_server()
|
the-stack_0_8331 | import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor():
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
return torch.sparse_coo_tensor(i, v, (2, 3))
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
the-stack_0_8332 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from migrate.versioning import api as versioning_api
# See LP bug #719834. sqlalchemy-migrate changed location of
# exceptions.py after 0.6.0.
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
from migrate import exceptions as versioning_exceptions
from melange.common import exception
logger = logging.getLogger('melange.db.migration')
def db_version(options, repo_path=None):
"""Return the database's current migration number.
:param options: options dict
:retval version number
"""
repo_path = get_migrate_repo_path(repo_path)
sql_connection = options['sql_connection']
try:
return versioning_api.db_version(sql_connection, repo_path)
except versioning_exceptions.DatabaseNotControlledError:
msg = ("database '%(sql_connection)s' is not under migration control"
% locals())
raise exception.DatabaseMigrationError(msg)
def upgrade(options, version=None, repo_path=None):
"""Upgrade the database's current migration level.
:param options: options dict
:param version: version to upgrade (defaults to latest)
:retval version number
"""
db_version(options, repo_path) # Ensure db is under migration control
repo_path = get_migrate_repo_path(repo_path)
sql_connection = options['sql_connection']
version_str = version or 'latest'
logger.info("Upgrading %(sql_connection)s to version %(version_str)s" %
locals())
return versioning_api.upgrade(sql_connection, repo_path, version)
def downgrade(options, version, repo_path=None):
"""Downgrade the database's current migration level.
:param options: options dict
:param version: version to downgrade to
:retval version number
"""
db_version(options, repo_path) # Ensure db is under migration control
repo_path = get_migrate_repo_path(repo_path)
sql_connection = options['sql_connection']
logger.info("Downgrading %(sql_connection)s to version %(version)s" %
locals())
return versioning_api.downgrade(sql_connection, repo_path, version)
def version_control(options, repo_path=None):
"""Place a database under migration control.
:param options: options dict
"""
sql_connection = options['sql_connection']
try:
_version_control(options)
except versioning_exceptions.DatabaseAlreadyControlledError:
msg = ("database '%(sql_connection)s' is already under migration "
"control" % locals())
raise exception.DatabaseMigrationError(msg)
def _version_control(options, repo_path):
"""Place a database under migration control.
:param options: options dict
"""
repo_path = get_migrate_repo_path(repo_path)
sql_connection = options['sql_connection']
return versioning_api.version_control(sql_connection, repo_path)
def db_sync(options, version=None, repo_path=None):
"""Place a database under migration control and perform an upgrade.
:param options: options dict
:param repo_path: used for plugin db migrations, defaults to main repo
:retval version number
"""
try:
_version_control(options, repo_path)
except versioning_exceptions.DatabaseAlreadyControlledError:
pass
upgrade(options, version=version, repo_path=repo_path)
def get_migrate_repo_path(repo_path=None):
"""Get the path for the migrate repository."""
default_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
repo_path = repo_path or default_path
assert os.path.exists(repo_path)
return repo_path
|
the-stack_0_8336 | import os
import time
import datetime
import tensorflow as tf
import numpy as np
import data_utils as utils
from tensorflow.contrib import learn
from text_cnn import TextCNN
from data_utils import IMDBDataset
import argparse
import pandas as pd
import pickle
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
def get_args_from_command_line():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
# necessary
parser.add_argument("--checkpoint_dir", type=str, help="Path to the models", default="/home/manuto/Documents/world_bank/bert_twitter_labor/code/glove-text-cnn/runs/default_run_name/checkpoints")
parser.add_argument("--eval_data_path", type=str, help="Path to the evaluation data. Must be in csv format.", default="/home/manuto/Documents/world_bank/bert_twitter_labor/code/twitter/data/may20_9Klabels/data_binary_pos_neg_balanced")
parser.add_argument("--vocab_path", type=str, help="Path pickle file.", default="/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/vocab.pckl")
parser.add_argument("--preprocessing", default=False, type=bool)
args = parser.parse_args()
return args
def prepare_filepath_for_storing_pred(eval_data_path: str) -> str:
path_to_store_pred = os.path.join(os.path.dirname(eval_data_path), 'glove_predictions')
if not os.path.exists(path_to_store_pred):
os.makedirs(path_to_store_pred)
return path_to_store_pred
def tokenizer(text):
return [wdict.get(w.lower(), 0) for w in text.split(' ')]
def pad_dataset(dataset, maxlen):
return np.array(
[np.pad(r, (0, maxlen - len(r)), mode='constant') if len(r) < maxlen else np.array(r[:maxlen])
for r in dataset])
def create_label(label):
if label == 1:
return [0, 1]
elif label == 0:
return [1, 0]
args = get_args_from_command_line()
print ("Intialising test parameters ...")
batch_size = 64
# Checkpoint directory from training run
checkpoint_dir = args.checkpoint_dir
# Evaluate on all training data
eval_train = False
# Misc Parameters
allow_soft_placement = True
log_device_placement = False
print ("Loading test data ...")
eval_df = pd.read_csv(args.eval_data_path, lineterminator='\n')
with open(args.vocab_path, 'rb') as dfile:
wdict = pickle.load(dfile)
#Preprocessing
text_processor = TextPreProcessor(
# terms that will be normalized
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
# terms that will be annotated
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="twitter",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="twitter",
unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
def ekphrasis_preprocessing(tweet):
return " ".join(text_processor.pre_process_doc(tweet))
if args.preprocessing:
eval_df['text_preprocessed'] = eval_df['text'].apply(ekphrasis_preprocessing)
print("*********Text has been preprocessed*********")
eval_df = eval_df[eval_df['text_preprocessed'].apply(lambda x: isinstance(x, str))]
text_tokenized = eval_df['text_preprocessed'].apply(tokenizer)
else:
eval_df = eval_df[eval_df['text'].apply(lambda x: isinstance(x, str))]
text_tokenized = eval_df['text'].apply(tokenizer)
x_test = pad_dataset(text_tokenized.values.tolist(), 128)
#y_test = np.array((eval_df['class'].apply(create_label)).values.tolist())
# Evaluation
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
predictions_proba = graph.get_operation_by_name("output/predictions_proba").outputs[0]
#predictions_proba = predictions_proba[:, 1]
# Generate batches for one epoch
batches = utils.batch_iter(list(x_test), batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_predictions_proba = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
batch_predictions_proba = sess.run(predictions_proba, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
all_predictions_proba = np.concatenate([all_predictions_proba, batch_predictions_proba[:, 1]])
# Print accuracy if y_test is defined
if all_predictions is not None:
print("Predictions done")
#y_test = [col[1] for col in y_test]
#correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(all_predictions_proba)))
eval_df['glove_cnn_class_pred'] = all_predictions_proba
#print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
output_dir = prepare_filepath_for_storing_pred(args.eval_data_path)
output_path = os.path.join(output_dir, os.path.split(args.eval_data_path)[1])
eval_df.to_csv(output_path, index=False)
print("Predictions saved at:", output_path) |
the-stack_0_8337 | import numpy as np
import matplotlib.pyplot as plt
import quantities as pq
import neo
import pandas as pd
import string
import glob
import sys
def read_murali_csv(fileName):
MEA_data = pd.read_csv(fileName , sep=',', encoding='latin1', skiprows = 6)
data = {}
row = 4
end = 10
letters = [letter for letter in string.ascii_uppercase]
letters.remove('I')
letter = 0 #start with A
for j in range(0, 12):
for i in range(row, end):
name = str(letters[letter]) + str(i)
data[name] = []
if (j < 3): row -= 1
elif (j > 2) and (j < 8): row += 0
elif (j > 7): row +=1
if (j < 3): end += 1
elif (j > 2) and (j < 8): end += 0
elif (j > 7): end -=1
letter += 1
#print(data)
MEA_data = MEA_data.reindex(sorted(MEA_data.columns), axis=1)
MEA_data.rename(columns=lambda x: x.split(' ')[0], inplace=True)
MEA_data_full = pd.DataFrame.from_dict(data)
for col in MEA_data_full.columns:
try:
MEA_data_full[col] = MEA_data[col]
except:
pass
#print(MEA_data_full)
MEA_data_full = MEA_data_full.div(1000000)
MEA_data_full = MEA_data_full.reindex(sorted(MEA_data_full.columns), axis=1)
MEA_data_full.replace(np.nan, -1, inplace=True)
del MEA_data
return MEA_data_full
def csv_to_raster(fileName, title, output):
MEA_data_full = read_murali_csv(fileName)
spikeTrainArray = []
t_start = 540
t_stop = 600
for col in MEA_data_full.columns:
values = MEA_data_full[col].values
values = values[values > t_start]
values = values[values < t_stop]
spikeTrainArray.append(neo.core.SpikeTrain(values * pq.s, t_stop = t_stop * pq.s, t_start = t_start * pq.s))
#spikeTrainArray.append(MEA_data_full[col].values)
for i, spiketrain in enumerate(spikeTrainArray):
plt.plot(spiketrain, i * np.ones_like(spiketrain), 'k|', markersize=2)
plt.axis('tight')
plt.title("Raster Plot - "+title)
plt.xlim(t_start, t_stop)
plt.ylim(-5, 125)
plt.xlabel('Time (s)', fontsize=16)
plt.ylabel('Channels', fontsize=16)
plt.gca().tick_params(axis='both', which='major', labelsize=14)
#plt.show()
name = output+"\\"+title+"_Raster"+".jpg"
plt.savefig(name, dpi=600)
del MEA_data_full
del spikeTrainArray
plt.clf()
def main(directory, output):
directoryPath = directory + "\\*.csv"
print(directoryPath)
dirFiles = glob.glob(directoryPath)
titles = list(map(lambda x: x.split('\\')[5][:-4], dirFiles))
print(titles)
for index, fileName in enumerate(dirFiles):
print(index, fileName)
title = titles[index]
try:
csv_to_raster(fileName, title, output)
except:
print("an error occurred. stopped on: " + fileName)
if __name__ == '__main__':
#accepts two cmd line arguments, input directory and output directory (no \ at the end of paths)
print(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
print(f"Argument {i:>6}: {arg}")
try:
if(len(sys.argv) < 2):
print("running with default location")
#First argument is directory of input .csv files. Second argument is directory to output raster plots
main("<Default input file directory for .csv spiking data>", "<Default output file directory for raster plots>")
elif(len(sys.argv) == 2):
main(sys.argv[1], sys.argv[1])
else:
main(sys.argv[1], sys.argv[2])
except IndexError:
print("no files in directory")
except:
print("something went wrong")
#main()
|
the-stack_0_8339 | from flask import Flask, render_template, redirect, url_for
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired, URL
import csv
app = Flask(__name__)
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
Bootstrap(app)
class CafeForm(FlaskForm):
cafe = StringField('Cafe name', validators=[DataRequired()])
location = StringField("Cafe Location on Google Maps (URL)", validators=[DataRequired(), URL()])
open = StringField("Opening Time e.g. 8AM", validators=[DataRequired()])
close = StringField("Closing Time e.g. 5:30PM", validators=[DataRequired()])
coffee_rating = SelectField("Coffee Rating", choices=["☕️", "☕☕", "☕☕☕", "☕☕☕☕", "☕☕☕☕☕"], validators=[DataRequired()])
wifi_rating = SelectField("Wifi Strength Rating", choices=["✘", "💪", "💪💪", "💪💪💪", "💪💪💪💪", "💪💪💪💪💪"], validators=[DataRequired()])
power_rating = SelectField("Power Socket Availability", choices=["✘", "🔌", "🔌🔌", "🔌🔌🔌", "🔌🔌🔌🔌", "🔌🔌🔌🔌🔌"], validators=[DataRequired()])
submit = SubmitField('Submit')
@app.route("/")
def home():
return render_template("index.html")
@app.route('/add', methods=["GET", "POST"])
def add_cafe():
form = CafeForm()
if form.validate_on_submit():
with open("cafe-data.csv", mode="a") as csv_file:
csv_file.write(f"\n{form.cafe.data},"
f"{form.location.data},"
f"{form.open.data},"
f"{form.close.data},"
f"{form.coffee_rating.data},"
f"{form.wifi_rating.data},"
f"{form.power_rating.data}")
return redirect(url_for('cafes'))
return render_template('add.html', form=form)
@app.route('/cafes')
def cafes():
with open('cafe-data.csv', encoding='utf-8', newline='') as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
list_of_rows = []
for row in csv_data:
list_of_rows.append(row)
return render_template('cafes.html', cafes=list_of_rows)
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_8341 | """ResNet handler.
Adapted from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
Two primary changes from original ResNet code:
1) Tapped delay line op is added to the output of every residual computation
- See project.models.layers & project.models.tdl
2) The timestep is set on the TDL in the forward pass
"""
import functools
import numpy as np
import torch.nn as nn
from collections import OrderedDict
from torchvision.models.utils import load_state_dict_from_url
from models import custom_ops
from models import layers as res_layers
from models import model_utils
from models.internal_classifiers import InternalClassifier
_MODEL_URLS = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
class ResNet(nn.Module):
"""Resnet base class."""
def __init__(self, name, block, layers, num_classes, **kwargs):
"""Initialize resnet."""
super(ResNet, self).__init__()
self.name = name
self._layers_arch = layers
self._num_classes = num_classes
self._train_mode = kwargs.get("train_mode", "baseline")
self._sdn_train_mode = self._train_mode in ["sdn", "ic_only"]
self._cascaded = kwargs.get("cascaded", False)
self._cascaded_scheme = kwargs.get("cascaded_scheme", "parallel")
# Set multiple FCs flag
self._multiple_fcs = kwargs.get("multiple_fcs", False)
self._multiple_fcs = not self._sdn_train_mode and self._multiple_fcs
if self._train_mode == "baseline":
self._time_bn = False
else:
self._time_bn = kwargs["bn_opts"]["temporal_stats"]
# Set up batch norm operation
self._norm_layer_op = self._setup_bn_op(**kwargs)
# Head layer
self.res_layer_count = 0
self.inplanes = 64
self.layer0 = res_layers.HeadLayer(
self.res_layer_count,
self.inplanes,
self._norm_layer_op,
time_bn=self._time_bn,
IC_active=self._sdn_train_mode,
num_classes=self._num_classes,
**kwargs,
)
self.res_layer_count += 1
# Residual Layers
self.layer1 = self._make_layer(block, 64, layers[0], **kwargs)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, **kwargs)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, **kwargs)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
final_layer=True, **kwargs)
self.layers = [self.layer1, self.layer2, self.layer3, self.layer4]
if self._multiple_fcs:
fcs = []
for i in range(self.timesteps):
fc_i = InternalClassifier(
n_channels=512,
num_classes=num_classes,
block_expansion=block.expansion,
)
fcs.append(fc_i)
self.fcs = nn.ModuleList(fcs)
else:
self.fc = InternalClassifier(
n_channels=512,
num_classes=num_classes,
block_expansion=block.expansion,
)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (self._norm_layer, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _setup_bn_op(self, **kwargs):
if self._cascaded and self._time_bn:
print("BatchNorm OP: Temporal")
self._norm_layer = custom_ops.BatchNorm2d
# Setup batchnorm opts
self.bn_opts = kwargs["bn_opts"]
self.bn_opts["n_timesteps"] = self.timesteps
norm_layer_op = functools.partial(self._norm_layer, self.bn_opts)
else:
print("BatchNorm OP: Standard")
self._norm_layer = nn.BatchNorm2d
norm_layer_op = self._norm_layer
return norm_layer_op
def _make_layer(self, block, planes, blocks,
stride=1, final_layer=False, **kwargs):
tdl_mode = kwargs.get("tdl_mode", "OSD")
tdl_alpha = kwargs.get("tdl_alpha", 0.0)
noise_var = kwargs.get("noise_var", 0.0)
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
custom_ops.conv1x1(self.inplanes, planes * block.expansion, stride),
)
layers = []
layers.append(
block(
self.res_layer_count,
self.inplanes,
planes,
stride,
downsample,
self._norm_layer_op,
tdl_alpha=tdl_alpha,
tdl_mode=tdl_mode,
noise_var=noise_var,
cascaded=self._cascaded,
cascaded_scheme=self._cascaded_scheme,
time_bn=self._time_bn,
num_classes=self._num_classes
)
)
self.res_layer_count += 1
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.res_layer_count,
self.inplanes,
planes,
norm_layer=self._norm_layer_op,
tdl_alpha=tdl_alpha,
tdl_mode=tdl_mode,
noise_var=noise_var,
cascaded=self._cascaded,
cascaded_scheme=self._cascaded_scheme,
time_bn=self._time_bn,
num_classes=self._num_classes
)
)
self.res_layer_count += 1
return nn.Sequential(*layers)
@property
def timesteps(self):
if self._cascaded:
n_timesteps = np.sum(self._layers_arch) + 1
else:
n_timesteps = 1
return n_timesteps
def _set_time(self, t):
self.layer0.set_time(t)
for layer in self.layers:
for block in layer:
block.set_time(t)
def set_target_inference_costs(
self,
normed_flops,
target_inference_costs,
use_all=False
):
if use_all:
print("Using all ICs!")
selected_ICs = list(range(len(normed_flops)-1))
IC_costs = normed_flops
else:
selected_ICs = []
IC_costs = []
for target_cost in target_inference_costs:
diffs = []
for normed_flop in normed_flops:
abs_diff = np.abs(target_cost - normed_flop)
diffs.append(abs_diff)
min_idx = np.argmin(diffs)
IC_cost = normed_flops[min_idx]
IC_costs.append(IC_cost)
selected_ICs.append(min_idx)
self.selected_ICs = np.array(selected_ICs)
self.IC_costs = np.array(IC_costs)
def turn_off_IC(self):
for k, params in self.named_parameters():
if "IC" in k and "final" not in k:
params.requires_grad = False
def freeze_backbone(self, verbose=False):
print("Freezing backbone param...")
self.frozen_params = []
self.unfrozen_params = []
for k, params in self.named_parameters():
if "IC" not in k:
self.frozen_params.append(k)
if verbose:
print(f"\t{k} [frozen]")
params.requires_grad = False
else:
self.unfrozen_params.append(k)
def _forward(self, x, t=0):
# Set time on all blocks
if self._cascaded:
self._set_time(t)
# Head layer
out = self.layer0(x)
# Res Layers
for layer in self.layers:
out = layer(out)
# Final layer
if self._multiple_fcs:
out = self.fcs[t](out)
else:
out = self.fc(out)
return out
def forward(self, x, t=0):
return self._forward(x, t)
def make_resnet(arch, block, layers, pretrained, **kwargs):
if kwargs.get("imagenet_pretrained", False):
assert arch in _MODEL_URLS, f"{arch} not found in _MODEL_URLS"
# Save specified num_classes and switch to imagenet # classes
num_classes = kwargs["num_classes"]
kwargs["num_classes"] = 1000
# Load model
model = ResNet(arch, block, layers, **kwargs)
# Load imagenet state dict
state_dict = load_state_dict_from_url(_MODEL_URLS[arch])
# Adjust names from loaded state_dict to match our model
new_dict = OrderedDict()
for k, v in state_dict.items():
if ".0.downsample.1" in k:
continue
# Prepend layer0 to head layer to match our code
if k.startswith("conv1") or k.startswith("bn1"):
k = f"layer0.{k}"
# Fix fc.fc missing weight
if k == "fc.weight":
k = f"fc.{k}"
if k == "fc.bias":
k = f"fc.{k}"
# Inflate batch norm along time dimension if cascaded model
if kwargs["cascaded"] and kwargs["bn_opts"]["temporal_stats"] and "running_" in k:
v = v.unsqueeze(dim=0).repeat(model.timesteps, 1)
# print(f"Inflating {k} to new shape: {v.shape}")
# Update dict
new_dict[k] = v
# Load imagenet state dict into our model
model.load_state_dict(new_dict)
print("Success: Loaded pretrained state dict!")
# Replace final layer to correct # class mapping
num_ftrs = model.fc.in_features
model.fc = InternalClassifier(num_ftrs, num_classes) # nn.Linear(num_ftrs, num_classes)
else:
model = ResNet(arch, block, layers, **kwargs)
if pretrained:
model = model_utils.load_model(model, kwargs)
return model
def resnet18(pretrained=False, **kwargs):
return make_resnet(
"resnet18",
res_layers.BasicBlock, [2, 2, 2, 2],
pretrained,
**kwargs,
)
def resnet34(pretrained=False, **kwargs):
return make_resnet(
"resnet34",
res_layers.BasicBlock,
[3, 4, 6, 3],
pretrained,
**kwargs,
)
def resnet50(pretrained=False, **kwargs):
return make_resnet(
"resnet50",
res_layers.Bottleneck,
[3, 4, 6, 3],
pretrained,
**kwargs,
)
def resnet101(pretrained=False, **kwargs):
return make_resnet(
"resnet101",
res_layers.Bottleneck,
[3, 4, 23, 3],
pretrained,
**kwargs,
)
def resnet152(pretrained=False, **kwargs):
return make_resnet(
"resnet152",
res_layers.Bottleneck,
[3, 8, 36, 3],
pretrained,
**kwargs,
)
|
the-stack_0_8342 | import asyncio
from dataclasses import dataclass
from magda.module import Module
from magda.decorators import register, accept, finalize, produce
from magda.utils.logger.logger import MagdaLogger
from examples.interfaces.common import Context
from examples.interfaces.fn import LambdaInterface
@accept(LambdaInterface)
@produce(LambdaInterface)
@register('ModuleD')
@finalize
class ModuleD(Module.Runtime):
SLEEP_TIME = 1
@dataclass
class Parameters:
threshold: float
def bootstrap(self, logger: MagdaLogger):
ctx: Context = self.context
params = self.Parameters(**self.parameters)
logger.info(f'Context.timer = {ctx.timer} | Threshold = {params.threshold}')
async def run(self, data: Module.ResultSet, *args, **kwargs):
# Access strings (results) from the previous modules
src = [text.fn() for text in data.of(LambdaInterface)]
# E.g. some IO operation (delay for example purposes)
await asyncio.sleep(self.SLEEP_TIME)
# Build output string and produce declared interface
msg = '(' + ' + '.join(src) + (' = ' if len(src) else '') + f'{self.name})'
return LambdaInterface(lambda: msg)
|
the-stack_0_8344 | import urllib.parse
import uuid
from abc import ABC
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from django.utils import timezone
from rest_framework.exceptions import ValidationError
from ee.clickhouse.client import sync_execute
from ee.clickhouse.materialized_columns.columns import ColumnName
from ee.clickhouse.models.action import format_action_filter
from ee.clickhouse.models.property import (
box_value,
get_property_string_expr,
get_single_or_multi_property_string_expr,
parse_prop_grouped_clauses,
)
from ee.clickhouse.models.util import PersonPropertiesMode
from ee.clickhouse.queries.breakdown_props import format_breakdown_cohort_join_query, get_breakdown_prop_values
from ee.clickhouse.queries.funnels.funnel_event_query import FunnelEventQuery
from ee.clickhouse.sql.funnels.funnel import FUNNEL_INNER_EVENT_STEPS_QUERY
from posthog.constants import (
FUNNEL_WINDOW_INTERVAL,
FUNNEL_WINDOW_INTERVAL_UNIT,
LIMIT,
OFFSET,
TREND_FILTER_TYPE_ACTIONS,
)
from posthog.models import Entity, Filter, Team
from posthog.models.property import PropertyName
from posthog.utils import relative_date_parse
class ClickhouseFunnelBase(ABC):
_filter: Filter
_team: Team
_include_timestamp: Optional[bool]
_include_preceding_timestamp: Optional[bool]
_extra_event_fields: List[ColumnName]
_extra_event_properties: List[PropertyName]
def __init__(
self,
filter: Filter,
team: Team,
include_timestamp: Optional[bool] = None,
include_preceding_timestamp: Optional[bool] = None,
base_uri: str = "/",
) -> None:
self._filter = filter
self._team = team
self._base_uri = base_uri
self.params = {
"team_id": self._team.pk,
"events": [], # purely a speed optimization, don't need this for filtering
}
self._include_timestamp = include_timestamp
self._include_preceding_timestamp = include_preceding_timestamp
# handle default if window isn't provided
if not self._filter.funnel_window_days and not self._filter.funnel_window_interval:
self._filter = self._filter.with_data({FUNNEL_WINDOW_INTERVAL: 14, FUNNEL_WINDOW_INTERVAL_UNIT: "day"})
if self._filter.funnel_window_days:
self._filter = self._filter.with_data(
{FUNNEL_WINDOW_INTERVAL: self._filter.funnel_window_days, FUNNEL_WINDOW_INTERVAL_UNIT: "day"}
)
if not self._filter.limit:
new_limit = {LIMIT: 100}
self._filter = self._filter.with_data(new_limit)
self.params.update(new_limit)
else:
self.params.update({LIMIT: self._filter.limit})
self.params.update({OFFSET: self._filter.offset})
self._extra_event_fields: List[ColumnName] = []
self._extra_event_properties: List[PropertyName] = []
if self._filter.include_recordings:
self._extra_event_fields = ["uuid"]
self._extra_event_properties = ["$session_id", "$window_id"]
self._update_filters()
def run(self, *args, **kwargs):
if len(self._filter.entities) == 0:
return []
results = self._exec_query()
return self._format_results(results)
def _serialize_step(self, step: Entity, count: int, people: Optional[List[uuid.UUID]] = None) -> Dict[str, Any]:
if step.type == TREND_FILTER_TYPE_ACTIONS:
name = step.get_action().name
else:
name = step.id
return {
"action_id": step.id,
"name": name,
"custom_name": step.custom_name,
"order": step.order,
"people": people if people else [],
"count": count,
"type": step.type,
}
@property
def extra_event_fields_and_properties(self):
return self._extra_event_fields + self._extra_event_properties
def _update_filters(self):
# format default dates
data: Dict[str, Any] = {}
if not self._filter._date_from:
data.update({"date_from": relative_date_parse("-7d")})
if not self._filter._date_to:
data.update({"date_to": timezone.now()})
if self._filter.breakdown and not self._filter.breakdown_type:
data.update({"breakdown_type": "event"})
# the API accepts either:
# a string (single breakdown) in parameter "breakdown"
# a list of numbers (one or more cohorts) in parameter "breakdown"
# a list of strings (multiple breakdown) in parameter "breakdowns"
# if the breakdown is a string, box it as a list to reduce paths through the code
#
# The code below ensures that breakdown is always an array
# without it affecting the multiple areas of the code outside of funnels that use breakdown
#
# Once multi property breakdown is implemented in Trends this becomes unnecessary
if isinstance(self._filter.breakdowns, List) and self._filter.breakdown_type in ["person", "event", None]:
data.update({"breakdown": [b.get("property") for b in self._filter.breakdowns]})
if isinstance(self._filter.breakdown, str) and self._filter.breakdown_type in ["person", "event", None]:
boxed_breakdown: List[Union[str, int]] = box_value(self._filter.breakdown)
data.update({"breakdown": boxed_breakdown})
for exclusion in self._filter.exclusions:
if exclusion.funnel_from_step is None or exclusion.funnel_to_step is None:
raise ValidationError("Exclusion event needs to define funnel steps")
if exclusion.funnel_from_step >= exclusion.funnel_to_step:
raise ValidationError("Exclusion event range is invalid. End of range should be greater than start.")
if exclusion.funnel_from_step >= len(self._filter.entities) - 1:
raise ValidationError(
"Exclusion event range is invalid. Start of range is greater than number of steps."
)
if exclusion.funnel_to_step > len(self._filter.entities) - 1:
raise ValidationError("Exclusion event range is invalid. End of range is greater than number of steps.")
for entity in self._filter.entities[exclusion.funnel_from_step : exclusion.funnel_to_step + 1]:
if entity.equals(exclusion) or exclusion.is_superset(entity):
raise ValidationError("Exclusion event can't be the same as funnel step")
self._filter = self._filter.with_data(data)
def _format_single_funnel(self, results, with_breakdown=False):
# Format of this is [step order, person count (that reached that step), array of person uuids]
steps = []
total_people = 0
for step in reversed(self._filter.entities):
if results and len(results) > 0:
total_people += results[step.order]
serialized_result = self._serialize_step(step, total_people, []) # persons not needed on initial return
if cast(int, step.order) > 0:
serialized_result.update(
{
"average_conversion_time": results[cast(int, step.order) + len(self._filter.entities) - 1],
"median_conversion_time": results[cast(int, step.order) + len(self._filter.entities) * 2 - 2],
}
)
else:
serialized_result.update({"average_conversion_time": None, "median_conversion_time": None})
# Construct converted and dropped people urls. Previously this logic was
# part of
# https://github.com/PostHog/posthog/blob/e8d7b2fe6047f5b31f704572cd3bebadddf50e0f/frontend/src/scenes/insights/InsightTabs/FunnelTab/FunnelStepTable.tsx#L483:L483
funnel_step = step.index + 1
converted_people_filter = self._filter.with_data({"funnel_step": funnel_step})
dropped_people_filter = self._filter.with_data({"funnel_step": -funnel_step})
if with_breakdown:
breakdown = results[-1]
serialized_result.update({"breakdown": breakdown, "breakdown_value": breakdown})
# important to not try and modify this value any how - as these
# are keys for fetching persons
# Add in the breakdown to people urls as well
converted_people_filter = converted_people_filter.with_data({"funnel_step_breakdown": breakdown})
dropped_people_filter = dropped_people_filter.with_data({"funnel_step_breakdown": breakdown})
serialized_result.update(
{
"converted_people_url": f"{self._base_uri}api/person/funnel/?{urllib.parse.urlencode(converted_people_filter.to_params())}",
"dropped_people_url": (
f"{self._base_uri}api/person/funnel/?{urllib.parse.urlencode(dropped_people_filter.to_params())}"
# NOTE: If we are looking at the first step, there is no drop off,
# everyone converted, otherwise they would not have been
# included in the funnel.
if step.index > 0
else None
),
}
)
steps.append(serialized_result)
return steps[::-1] # reverse
def _format_results(self, results):
if not results or len(results) == 0:
return []
if self._filter.breakdown:
return [self._format_single_funnel(res, with_breakdown=True) for res in results]
else:
return self._format_single_funnel(results[0])
def _exec_query(self) -> List[Tuple]:
query = self.get_query()
return sync_execute(query, self.params)
def _get_timestamp_outer_select(self) -> str:
if self._include_preceding_timestamp:
return ", max_timestamp, min_timestamp"
elif self._include_timestamp:
return ", timestamp"
else:
return ""
def _get_timestamp_selects(self) -> Tuple[str, str]:
"""
Returns timestamp selectors for the target step and optionally the preceding step.
In the former case, always returns the timestamp for the first and last step as well.
"""
target_step = self._filter.funnel_step
final_step = len(self._filter.entities) - 1
first_step = 0
if not target_step:
return "", ""
if target_step < 0:
# the first valid dropoff argument for funnel_step is -2
# -2 refers to persons who performed the first step but never made it to the second
if target_step == -1:
raise ValueError("To request dropoff of initial step use -2")
target_step = abs(target_step) - 2
else:
target_step -= 1
if self._include_preceding_timestamp:
if target_step == 0:
raise ValueError("Cannot request preceding step timestamp if target funnel step is the first step")
return (
f", latest_{target_step}, latest_{target_step - 1}",
f", argMax(latest_{target_step}, steps) as max_timestamp, argMax(latest_{target_step - 1}, steps) as min_timestamp",
)
elif self._include_timestamp:
return (
f", latest_{target_step}, latest_{final_step}, latest_{first_step}",
f", argMax(latest_{target_step}, steps) as timestamp, argMax(latest_{final_step}, steps) as final_timestamp, argMax(latest_{first_step}, steps) as first_timestamp",
)
else:
return "", ""
def _get_step_times(self, max_steps: int):
conditions: List[str] = []
for i in range(1, max_steps):
conditions.append(
f"if(isNotNull(latest_{i}) AND latest_{i} <= latest_{i-1} + INTERVAL {self._filter.funnel_window_interval} {self._filter.funnel_window_interval_unit_ch()}, "
f"dateDiff('second', toDateTime(latest_{i - 1}), toDateTime(latest_{i})), NULL) step_{i}_conversion_time"
)
formatted = ", ".join(conditions)
return f", {formatted}" if formatted else ""
def _get_partition_cols(self, level_index: int, max_steps: int):
cols: List[str] = []
for i in range(0, max_steps):
cols.append(f"step_{i}")
if i < level_index:
cols.append(f"latest_{i}")
for field in self.extra_event_fields_and_properties:
cols.append(f'"{field}_{i}"')
for exclusion_id, exclusion in enumerate(self._filter.exclusions):
if cast(int, exclusion.funnel_from_step) + 1 == i:
cols.append(f"exclusion_{exclusion_id}_latest_{exclusion.funnel_from_step}")
else:
duplicate_event = 0
if i > 0 and (
self._filter.entities[i].equals(self._filter.entities[i - 1])
or self._filter.entities[i].is_superset(self._filter.entities[i - 1])
):
duplicate_event = 1
cols.append(
f"min(latest_{i}) over (PARTITION by aggregation_target {self._get_breakdown_prop()} ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND {duplicate_event} PRECEDING) latest_{i}"
)
for field in self.extra_event_fields_and_properties:
cols.append(
f'last_value("{field}_{i}") over (PARTITION by aggregation_target {self._get_breakdown_prop()} ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND {duplicate_event} PRECEDING) "{field}_{i}"'
)
for exclusion_id, exclusion in enumerate(self._filter.exclusions):
# exclusion starting at step i follows semantics of step i+1 in the query (since we're looking for exclusions after step i)
if cast(int, exclusion.funnel_from_step) + 1 == i:
cols.append(
f"min(exclusion_{exclusion_id}_latest_{exclusion.funnel_from_step}) over (PARTITION by aggregation_target {self._get_breakdown_prop()} ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 0 PRECEDING) exclusion_{exclusion_id}_latest_{exclusion.funnel_from_step}"
)
return ", ".join(cols)
def _get_exclusion_condition(self):
if not self._filter.exclusions:
return ""
conditions = []
for exclusion_id, exclusion in enumerate(self._filter.exclusions):
from_time = f"latest_{exclusion.funnel_from_step}"
to_time = f"latest_{exclusion.funnel_to_step}"
exclusion_time = f"exclusion_{exclusion_id}_latest_{exclusion.funnel_from_step}"
condition = (
f"if( {exclusion_time} > {from_time} AND {exclusion_time} < "
f"if(isNull({to_time}), {from_time} + INTERVAL {self._filter.funnel_window_interval} {self._filter.funnel_window_interval_unit_ch()}, {to_time}), 1, 0)"
)
conditions.append(condition)
if conditions:
return f", arraySum([{','.join(conditions)}]) as exclusion"
else:
return ""
def _get_sorting_condition(self, curr_index: int, max_steps: int):
if curr_index == 1:
return "1"
conditions: List[str] = []
for i in range(1, curr_index):
conditions.append(f"latest_{i - 1} < latest_{i }")
conditions.append(
f"latest_{i} <= latest_0 + INTERVAL {self._filter.funnel_window_interval} {self._filter.funnel_window_interval_unit_ch()}"
)
return f"if({' AND '.join(conditions)}, {curr_index}, {self._get_sorting_condition(curr_index - 1, max_steps)})"
def _get_inner_event_query(
self, entities=None, entity_name="events", skip_entity_filter=False, skip_step_filter=False
) -> str:
entities_to_use = entities or self._filter.entities
event_query, params = FunnelEventQuery(
filter=self._filter,
team=self._team,
extra_fields=self._extra_event_fields,
extra_event_properties=self._extra_event_properties,
).get_query(entities_to_use, entity_name, skip_entity_filter=skip_entity_filter)
self.params.update(params)
if skip_step_filter:
steps_conditions = "1=1"
else:
steps_conditions = self._get_steps_conditions(length=len(entities_to_use))
all_step_cols: List[str] = []
for index, entity in enumerate(entities_to_use):
step_cols = self._get_step_col(entity, index, entity_name)
all_step_cols.extend(step_cols)
for exclusion_id, entity in enumerate(self._filter.exclusions):
step_cols = self._get_step_col(entity, entity.funnel_from_step, entity_name, f"exclusion_{exclusion_id}_")
# every exclusion entity has the form: exclusion_<id>_step_i & timestamp exclusion_<id>_latest_i
# where i is the starting step for exclusion on that entity
all_step_cols.extend(step_cols)
steps = ", ".join(all_step_cols)
breakdown_select_prop = self._get_breakdown_select_prop()
if len(breakdown_select_prop) > 0:
select_prop = f", {breakdown_select_prop}"
else:
select_prop = ""
extra_join = ""
if self._filter.breakdown:
if self._filter.breakdown_type == "cohort":
extra_join = self._get_cohort_breakdown_join()
else:
values = self._get_breakdown_conditions()
self.params.update({"breakdown_values": values})
return FUNNEL_INNER_EVENT_STEPS_QUERY.format(
steps=steps,
event_query=event_query,
extra_join=extra_join,
steps_condition=steps_conditions,
select_prop=select_prop,
)
def _get_steps_conditions(self, length: int) -> str:
step_conditions: List[str] = []
for index in range(length):
step_conditions.append(f"step_{index} = 1")
for exclusion_id, entity in enumerate(self._filter.exclusions):
step_conditions.append(f"exclusion_{exclusion_id}_step_{entity.funnel_from_step} = 1")
return " OR ".join(step_conditions)
def _get_step_col(self, entity: Entity, index: int, entity_name: str, step_prefix: str = "") -> List[str]:
# step prefix is used to distinguish actual steps, and exclusion steps
# without the prefix, we get the same parameter binding for both, which borks things up
step_cols: List[str] = []
condition = self._build_step_query(entity, index, entity_name, step_prefix)
step_cols.append(f"if({condition}, 1, 0) as {step_prefix}step_{index}")
step_cols.append(f"if({step_prefix}step_{index} = 1, timestamp, null) as {step_prefix}latest_{index}")
for field in self.extra_event_fields_and_properties:
step_cols.append(f'if({step_prefix}step_{index} = 1, "{field}", null) as "{step_prefix}{field}_{index}"')
return step_cols
def _build_step_query(self, entity: Entity, index: int, entity_name: str, step_prefix: str) -> str:
filters = self._build_filters(entity, index)
if entity.type == TREND_FILTER_TYPE_ACTIONS:
action = entity.get_action()
for action_step in action.steps.all():
if entity_name not in self.params[entity_name]:
self.params[entity_name].append(action_step.event)
action_query, action_params = format_action_filter(
team_id=self._team.pk, action=action, prepend=f"{entity_name}_{step_prefix}step_{index}"
)
if action_query == "":
return ""
self.params.update(action_params)
content_sql = "{actions_query} {filters}".format(actions_query=action_query, filters=filters,)
else:
if entity.id not in self.params[entity_name]:
self.params[entity_name].append(entity.id)
event_param_key = f"{entity_name}_{step_prefix}event_{index}"
self.params[event_param_key] = entity.id
content_sql = f"event = %({event_param_key})s {filters}"
return content_sql
def _build_filters(self, entity: Entity, index: int) -> str:
prop_filters, prop_filter_params = parse_prop_grouped_clauses(
team_id=self._team.pk,
property_group=entity.property_groups,
prepend=str(index),
person_properties_mode=PersonPropertiesMode.USING_PERSON_PROPERTIES_COLUMN,
person_id_joined_alias="aggregation_target",
)
self.params.update(prop_filter_params)
return prop_filters
def _get_funnel_person_step_condition(self):
step_num = self._filter.funnel_step
custom_steps = self._filter.funnel_custom_steps
max_steps = len(self._filter.entities)
conditions = []
if custom_steps:
self.params.update({"custom_step_num": custom_steps})
conditions.append("steps IN %(custom_step_num)s")
elif step_num is not None:
if step_num >= 0:
self.params.update({"step_num": [i for i in range(step_num, max_steps + 1)]})
conditions.append("steps IN %(step_num)s")
else:
self.params.update({"step_num": abs(step_num) - 1})
conditions.append("steps = %(step_num)s")
else:
raise ValueError("Missing both funnel_step and funnel_custom_steps")
if self._filter.funnel_step_breakdown is not None:
breakdown_prop_value = self._filter.funnel_step_breakdown
if isinstance(breakdown_prop_value, int) and self._filter.breakdown_type != "cohort":
breakdown_prop_value = str(breakdown_prop_value)
self.params.update({"breakdown_prop_value": breakdown_prop_value})
conditions.append("hasAll(arrayFlatten(array(prop)), arrayFlatten(array(%(breakdown_prop_value)s)))")
return " AND ".join(conditions)
def _get_funnel_person_step_events(self):
if self._filter.include_recordings:
step_num = self._filter.funnel_step
if self._filter.include_final_matching_events:
# Always returns the user's final step of the funnel
return ", final_matching_events as matching_events"
elif step_num is None:
raise ValueError("Missing funnel_step filter property")
if step_num >= 0:
# None drop off case
self.params.update({"matching_events_step_num": step_num - 1})
else:
# Drop off case if negative number
self.params.update({"matching_events_step_num": abs(step_num) - 2})
return ", step_%(matching_events_step_num)s_matching_events as matching_events"
return ""
def _get_count_columns(self, max_steps: int):
cols: List[str] = []
for i in range(max_steps):
cols.append(f"countIf(steps = {i + 1}) step_{i + 1}")
return ", ".join(cols)
def _get_step_time_names(self, max_steps: int):
names = []
for i in range(1, max_steps):
names.append(f"step_{i}_conversion_time")
formatted = ",".join(names)
return f", {formatted}" if formatted else ""
def _get_final_matching_event(self, max_steps: int):
statement = None
for i in range(max_steps - 1, -1, -1):
if i == max_steps - 1:
statement = f"if(isNull(latest_{i}),step_{i-1}_matching_event,step_{i}_matching_event)"
elif i == 0:
statement = f"if(isNull(latest_0),(null,null,null,null),{statement})"
else:
statement = f"if(isNull(latest_{i}),step_{i-1}_matching_event,{statement})"
return f",{statement} as final_matching_event" if statement else ""
def _get_matching_events(self, max_steps: int):
if self._filter.include_recordings:
events = []
for i in range(0, max_steps):
event_fields = ["latest"] + self.extra_event_fields_and_properties
event_fields_with_step = ", ".join([f'"{field}_{i}"' for field in event_fields])
event_clause = f"({event_fields_with_step}) as step_{i}_matching_event"
events.append(event_clause)
matching_event_select_statements = "," + ", ".join(events)
final_matching_event_statement = self._get_final_matching_event(max_steps)
return matching_event_select_statements + final_matching_event_statement
return ""
def _get_step_time_avgs(self, max_steps: int, inner_query: bool = False):
conditions: List[str] = []
for i in range(1, max_steps):
conditions.append(
f"avg(step_{i}_conversion_time) step_{i}_average_conversion_time_inner"
if inner_query
else f"avg(step_{i}_average_conversion_time_inner) step_{i}_average_conversion_time"
)
formatted = ", ".join(conditions)
return f", {formatted}" if formatted else ""
def _get_step_time_median(self, max_steps: int, inner_query: bool = False):
conditions: List[str] = []
for i in range(1, max_steps):
conditions.append(
f"median(step_{i}_conversion_time) step_{i}_median_conversion_time_inner"
if inner_query
else f"median(step_{i}_median_conversion_time_inner) step_{i}_median_conversion_time"
)
formatted = ", ".join(conditions)
return f", {formatted}" if formatted else ""
def _get_matching_event_arrays(self, max_steps: int):
select_clause = ""
if self._filter.include_recordings:
for i in range(0, max_steps):
select_clause += f", groupArray(10)(step_{i}_matching_event) as step_{i}_matching_events"
select_clause += f", groupArray(10)(final_matching_event) as final_matching_events"
return select_clause
def get_query(self) -> str:
raise NotImplementedError()
def get_step_counts_query(self) -> str:
raise NotImplementedError()
def get_step_counts_without_aggregation_query(self) -> str:
raise NotImplementedError()
def _get_breakdown_select_prop(self) -> str:
if self._filter.breakdown:
self.params.update({"breakdown": self._filter.breakdown})
if self._filter.breakdown_type == "person":
return get_single_or_multi_property_string_expr(
self._filter.breakdown, table="person", query_alias="prop"
)
elif self._filter.breakdown_type == "event":
return get_single_or_multi_property_string_expr(
self._filter.breakdown, table="events", query_alias="prop"
)
elif self._filter.breakdown_type == "cohort":
return "value AS prop"
elif self._filter.breakdown_type == "group":
# :TRICKY: We only support string breakdown for group properties
assert isinstance(self._filter.breakdown, str)
properties_field = f"group_properties_{self._filter.breakdown_group_type_index}"
expression, _ = get_property_string_expr(
table="groups", property_name=self._filter.breakdown, var="%(breakdown)s", column=properties_field
)
return f"{expression} AS prop"
return ""
def _get_cohort_breakdown_join(self) -> str:
cohort_queries, ids, cohort_params = format_breakdown_cohort_join_query(self._team.pk, self._filter)
self.params.update({"breakdown_values": ids})
self.params.update(cohort_params)
return f"""
INNER JOIN (
{cohort_queries}
) cohort_join
ON events.distinct_id = cohort_join.distinct_id
"""
def _get_breakdown_conditions(self) -> Optional[str]:
"""
For people, pagination sets the offset param, which is common across filters
and gives us the wrong breakdown values here, so we override it.
For events, we assume breakdown values remain stable across the funnel,
so using just the first entity to get breakdown values is ok.
if this is a multi property breakdown then the breakdown values are misleading
e.g. [Chrome, Safari], [95, 15] doesn't make clear that Chrome 15 isn't valid but Safari 15 is
so the generated list here must be [[Chrome, 95], [Safari, 15]]
"""
if self._filter.breakdown:
limit = self._filter.breakdown_limit_or_default
first_entity = self._filter.entities[0]
return get_breakdown_prop_values(
self._filter, first_entity, "count(*)", self._team.pk, limit, extra_params={"offset": 0}
)
return None
def _get_breakdown_prop(self, group_remaining=False) -> str:
if self._filter.breakdown:
if group_remaining and self._filter.breakdown_type in ["person", "event"]:
return ", if(has(%(breakdown_values)s, prop), prop, ['Other']) as prop"
elif group_remaining and self._filter.breakdown_type == "group":
return ", if(has(%(breakdown_values)s, prop), prop, 'Other') as prop"
else:
return ", prop"
else:
return ""
|
the-stack_0_8345 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Dict
from typing import List
from typing import Tuple
import tensorflow as tf
from nncf import NNCFConfig
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFGraph
from nncf.common.graph import NNCFNodeName
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.pruning.clusterization import Cluster
from nncf.common.pruning.clusterization import Clusterization
from nncf.common.pruning.mask_propagation import MaskPropagationAlgorithm
from nncf.common.pruning.node_selector import PruningNodeSelector
from nncf.common.pruning.statistics import PrunedLayerSummary
from nncf.common.pruning.structs import PrunedLayerInfoBase
from nncf.common.pruning.utils import is_prunable_depthwise_conv
from nncf.common.pruning.utils import get_output_channels
from nncf.common.utils.logger import logger as nncf_logger
from nncf.common.compression import BaseCompressionAlgorithmController
from nncf.config.extractors import extract_algo_specific_config
from nncf.tensorflow.api.compression import TFCompressionAlgorithmBuilder
from nncf.tensorflow.graph.converter import TFModelConverterFactory
from nncf.tensorflow.graph.metatypes.keras_layers import TFBatchNormalizationLayerMetatype
from nncf.tensorflow.graph.model_transformer import TFModelTransformer
from nncf.tensorflow.graph.transformations.commands import TFLayerWeight
from nncf.tensorflow.graph.transformations.commands import TFInsertionCommand
from nncf.tensorflow.graph.transformations.layout import TFTransformationLayout
from nncf.tensorflow.graph.utils import get_layer_identifier
from nncf.tensorflow.graph.utils import get_nncf_operations
from nncf.tensorflow.tensor import TFNNCFTensor
from nncf.tensorflow.pruning.tensor_processor import TFNNCFPruningTensorProcessor
from nncf.tensorflow.pruning.operations import TFElementwisePruningOp
from nncf.tensorflow.pruning.operations import TFIdentityMaskForwardPruningOp
from nncf.tensorflow.pruning.operations import TF_PRUNING_OPERATOR_METATYPES
from nncf.tensorflow.pruning.utils import get_filter_axis
from nncf.tensorflow.pruning.utils import get_filters_num
from nncf.tensorflow.sparsity.magnitude.operation import BinaryMask
from nncf.tensorflow.sparsity.utils import strip_model_from_masks
class PrunedLayerInfo(PrunedLayerInfoBase):
def __init__(self, node_name: NNCFNodeName, layer_name: str, node_id: int, is_depthwise: bool):
super().__init__(node_name, node_id, is_depthwise)
self.layer_name = layer_name
class BasePruningAlgoBuilder(TFCompressionAlgorithmBuilder):
"""
Determines which modifications should be made to the original model in
order to enable pruning during fine-tuning.
"""
def __init__(self, config: NNCFConfig, should_init: bool = True):
super().__init__(config, should_init)
params = self._algo_config.get('params', {})
self._params = params
self._ignore_frozen_layers = True
self._prune_first = params.get('prune_first_conv', False)
self._prune_batch_norms = params.get('prune_batch_norms', True)
self._prune_downsample_convs = params.get('prune_downsample_convs', False)
self._prunable_types = self._get_op_types_of_pruned_layers()
types_of_grouping_ops = self._get_types_of_grouping_ops()
self._pruning_node_selector = PruningNodeSelector(TF_PRUNING_OPERATOR_METATYPES,
self._prunable_types,
types_of_grouping_ops,
self.ignored_scopes,
self.target_scopes,
self._prune_first,
self._prune_downsample_convs)
self._pruned_layer_groups_info = None
self._graph = None
self._op_names = []
def apply_to(self, model: tf.keras.Model) -> tf.keras.Model:
"""
Adds pruning masks to the model.
:param model: The original uncompressed model.
:return: The model with pruning masks.
"""
transformer = TFModelTransformer(model)
transformation_layout = self.get_transformation_layout(model)
return transformer.transform(transformation_layout)
def get_transformation_layout(self, model: tf.keras.Model) -> TFTransformationLayout:
"""
Computes necessary model transformations (pruning mask insertions) to enable pruning.
:param model: The original uncompressed model.
:return: The instance of the `TransformationLayout` class containing
a list of pruning mask insertions.
"""
converter = TFModelConverterFactory.create(model)
self._graph = converter.convert()
groups_of_nodes_to_prune = self._pruning_node_selector.create_pruning_groups(self._graph)
transformations = TFTransformationLayout()
shared_layers = set()
self._pruned_layer_groups_info = Clusterization[PrunedLayerInfo](lambda x: x.layer_name)
for i, group in enumerate(groups_of_nodes_to_prune.get_all_clusters()):
group_minfos = []
for node in group.elements:
layer_name = get_layer_identifier(node)
layer = model.get_layer(layer_name)
group_minfos.append(PrunedLayerInfo(node.node_name, layer_name, node.node_id,
is_prunable_depthwise_conv(node)))
# Add output_mask to elements to run mask_propagation
# and detect spec_nodes that will be pruned.
# It should be done for all elements of shared layer.
node.data['output_mask'] = TFNNCFTensor(tf.ones(get_output_channels(node)))
if layer_name in shared_layers:
continue
if node.is_shared():
shared_layers.add(layer_name)
# Check that we need to prune weights in this op
assert self._is_pruned_layer(layer)
nncf_logger.info('Adding Weight Pruner in: %s', layer_name)
_, layer_info = converter.get_layer_info_for_node(node.node_name)
for weight_def in node.metatype.weight_definitions:
transformations.register(
self._get_insertion_command_binary_mask(
layer_info.layer_name, weight_def.weight_attr_name)
)
if node.metatype.bias_attr_name is not None and \
getattr(layer, node.metatype.bias_attr_name) is not None:
transformations.register(
self._get_insertion_command_binary_mask(
layer_info.layer_name, node.metatype.bias_attr_name)
)
cluster = Cluster[PrunedLayerInfo](i, group_minfos, [n.node_id for n in group.elements])
self._pruned_layer_groups_info.add_cluster(cluster)
# Propagating masks across the graph to detect spec_nodes that will be pruned
mask_propagator = MaskPropagationAlgorithm(self._graph, TF_PRUNING_OPERATOR_METATYPES,
TFNNCFPruningTensorProcessor)
mask_propagator.mask_propagation()
# Add masks for all spec modules, because prunable batchnorm layers can be determined
# at the moment of mask propagation
types_spec_layers = [TFBatchNormalizationLayerMetatype] \
if self._prune_batch_norms else []
spec_nodes = self._graph.get_nodes_by_metatypes(types_spec_layers)
for spec_node in spec_nodes:
layer_name = get_layer_identifier(spec_node)
layer = model.get_layer(layer_name)
if spec_node.data['output_mask'] is None:
# Skip elements that will not be pruned
continue
if layer_name in shared_layers:
continue
if spec_node.is_shared():
shared_layers.add(layer_name)
nncf_logger.info('Adding Weight Pruner in: %s', layer_name)
_, layer_info = converter.get_layer_info_for_node(spec_node.node_name)
for weight_def in spec_node.metatype.weight_definitions:
if spec_node.metatype is TFBatchNormalizationLayerMetatype \
and not layer.scale and weight_def.weight_attr_name == 'gamma':
nncf_logger.debug('Fused gamma parameter encountered in BatchNormalization layer. '
'Do not add mask to it.')
continue
transformations.register(
self._get_insertion_command_binary_mask(
layer_info.layer_name, weight_def.weight_attr_name)
)
transformations.register(
self._get_insertion_command_binary_mask(
layer_info.layer_name, spec_node.metatype.bias_attr_name)
)
return transformations
def initialize(self, model: tf.keras.Model) -> None:
pass
def _get_insertion_command_binary_mask(self, layer_name: str,
attr_name: str) -> TFInsertionCommand:
op_name = self._get_pruning_operation_name(layer_name, attr_name)
self._op_names.append(op_name)
return TFInsertionCommand(
target_point=TFLayerWeight(layer_name, attr_name),
callable_object=BinaryMask(op_name),
priority=TransformationPriority.PRUNING_PRIORITY
)
@staticmethod
def _get_bn_for_node(node: NNCFNode, bn_nodes: List[NNCFNode]) -> Tuple[bool, List[NNCFNode]]:
is_finished = False
propagating_ops = [op_name for meta_op in [TFIdentityMaskForwardPruningOp, TFElementwisePruningOp]
for op_name in meta_op.get_all_op_aliases()]
if node.node_type == 'BatchNormalization':
is_finished = True
bn_nodes.append(node)
elif node.node_type not in propagating_ops:
is_finished = True
return is_finished, bn_nodes
def _get_related_batchnorms(self, layer_name: str, group: Cluster, graph: NNCFGraph) -> List[NNCFNode]:
"""
Returns List of batchnorm elements related to the layer.
Note: Single node per layer for shared bactchnorm layers
"""
layer_nodes = [node_ for node_ in group.elements
if node_.layer_name == layer_name]
bn_nodes = []
bn_layer_names = []
for layer_node in layer_nodes:
for next_node in graph.get_next_nodes(layer_node):
for bn_node in graph.traverse_graph(next_node, self._get_bn_for_node):
bn_layer_name = get_layer_identifier(bn_node)
if bn_layer_name not in bn_layer_names:
bn_layer_names.append(bn_layer_name)
bn_nodes.append(bn_node)
return bn_nodes
def _is_pruned_layer(self, layer: tf.keras.layers.Layer) -> bool:
"""
Return whether this layer should be pruned or not.
"""
raise NotImplementedError
def _get_op_types_of_pruned_layers(self) -> List[str]:
"""
Returns list of operation types that should be pruned.
"""
raise NotImplementedError
def _get_types_of_grouping_ops(self) -> List[str]:
raise NotImplementedError
def _get_pruning_operation_name(self, layer_name: str, weight_attr_name: str) -> str:
return f'{layer_name}_{weight_attr_name}_pruning_binary_mask'
class BasePruningAlgoController(BaseCompressionAlgorithmController):
"""
Serves as a handle to the additional modules, parameters and hooks inserted
into the original uncompressed model to enable pruning.
"""
def __init__(self,
target_model: tf.keras.Model,
op_names: List[str],
prunable_types: List[str],
pruned_layer_groups_info: Clusterization[PrunedLayerInfo],
config):
super().__init__(target_model)
self._op_names = op_names
self._prunable_types = prunable_types
self.config = config
self.pruning_config = extract_algo_specific_config(config,
"filter_pruning")
params = self.pruning_config.get('params', {})
self.pruning_init = self.pruning_config.get('pruning_init', 0)
self.pruning_level = self.pruning_init
self._pruned_layer_groups_info = pruned_layer_groups_info
self.prune_flops = False
self._check_pruning_level(params)
self._num_of_sparse_elements_by_node = None
def freeze(self):
raise NotImplementedError
def set_pruning_level(self, pruning_level: float):
raise NotImplementedError
def step(self, next_step):
pass
def _check_pruning_level(self, params):
"""
Check that set only one of pruning target params
"""
pruning_target = params.get('pruning_target', None)
pruning_flops_target = params.get('pruning_flops_target', None)
if pruning_target and pruning_flops_target:
raise ValueError('Only one parameter from \'pruning_target\' and \'pruning_flops_target\' can be set.')
if pruning_flops_target:
self.prune_flops = True
def _calculate_num_of_sparse_elements_by_node(self) -> Dict[NNCFNodeName, int]:
"""Returns the number of sparse elements per node. Take into account names ('^') for the shared ops."""
if self._num_of_sparse_elements_by_node is None:
self._calculate_pruned_layers_summary()
retval = {}
for group in self._pruned_layer_groups_info.get_all_clusters():
for node in group.elements:
retval[node.node_name] = self._num_of_sparse_elements_by_node[node.layer_name]
return retval
def _calculate_pruned_layers_summary(self) -> List[PrunedLayerSummary]:
pruning_levels = []
mask_names = []
weights_shapes = []
mask_shapes = []
self._num_of_sparse_elements_by_node = {}
for wrapped_layer, weight_attr, op_name in get_nncf_operations(self._model, self._op_names):
mask = wrapped_layer.ops_weights[op_name.name]['mask']
mask_names.append(mask.name)
weights_shapes.append(list(mask.shape))
reduce_axes = list(range(len(mask.shape)))
filter_axis = get_filter_axis(wrapped_layer, weight_attr)
if filter_axis == -1:
filter_axis = reduce_axes[filter_axis]
reduce_axes.remove(filter_axis)
filter_mask = tf.reduce_max(tf.cast(mask, tf.int32), axis=reduce_axes, keepdims=True)
mask_shapes.append(list(filter_mask.shape))
filters_number = get_filters_num(wrapped_layer)
pruned_filters_number = filters_number - tf.reduce_sum(filter_mask)
pruning_levels.append(pruned_filters_number / filters_number)
pruned_filter_number = filters_number - tf.reduce_sum(filter_mask)
self._num_of_sparse_elements_by_node[wrapped_layer.name] = pruned_filter_number.numpy()
pruning_levels = tf.keras.backend.batch_get_value(pruning_levels)
mask_pruning = list(zip(mask_names, weights_shapes, mask_shapes, pruning_levels))
pruned_layers_summary = []
for mask_name, weights_shape, mask_shape, pruning_level in mask_pruning:
pruned_layers_summary.append(PrunedLayerSummary(mask_name, weights_shape, mask_shape, pruning_level))
return pruned_layers_summary
def strip_model(self, model: tf.keras.Model) -> tf.keras.Model:
return strip_model_from_masks(model, self._op_names)
|
the-stack_0_8347 | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet.defer import Deferred
from synapse.util.caches.snapshot_cache import SnapshotCache
from .. import unittest
class SnapshotCacheTestCase(unittest.TestCase):
def setUp(self):
self.cache = SnapshotCache()
self.cache.DURATION_MS = 1
def test_get_set(self):
# Check that getting a missing key returns None
self.assertEquals(self.cache.get(0, "key"), None)
# Check that setting a key with a deferred returns
# a deferred that resolves when the initial deferred does
d = Deferred()
set_result = self.cache.set(0, "key", d)
self.assertIsNotNone(set_result)
self.assertFalse(set_result.called)
# Check that getting the key before the deferred has resolved
# returns a deferred that resolves when the initial deferred does.
get_result_at_10 = self.cache.get(10, "key")
self.assertIsNotNone(get_result_at_10)
self.assertFalse(get_result_at_10.called)
# Check that the returned deferreds resolve when the initial deferred
# does.
d.callback("v")
self.assertTrue(set_result.called)
self.assertTrue(get_result_at_10.called)
# Check that getting the key after the deferred has resolved
# before the cache expires returns a resolved deferred.
get_result_at_11 = self.cache.get(11, "key")
self.assertIsNotNone(get_result_at_11)
if isinstance(get_result_at_11, Deferred):
# The cache may return the actual result rather than a deferred
self.assertTrue(get_result_at_11.called)
# Check that getting the key after the deferred has resolved
# after the cache expires returns None
get_result_at_12 = self.cache.get(12, "key")
self.assertIsNone(get_result_at_12)
|
the-stack_0_8348 | #MenuTitle: Parameter Reporter
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Searches in Custom Parameter names of all registered parameters in the current app version.
"""
import vanilla
from AppKit import NSPasteboard, NSStringPboardType, NSUserDefaults
appInfo = GSGlyphsInfo.alloc().init()
def setClipboard( myText ):
"""
Sets the contents of the clipboard to myText.
Returns True if successful, False if unsuccessful.
"""
try:
myClipboard = NSPasteboard.generalPasteboard()
myClipboard.declareTypes_owner_( [NSStringPboardType], None )
myClipboard.setString_forType_( myText, NSStringPboardType )
return True
except Exception as e:
print(e)
return False
class ParameterReporter( object ):
fontParameters = appInfo.customFontParameters()
masterParameters = appInfo.customMasterParameters()
instanceParameters = appInfo.customInstanceParameters()
def __init__( self ):
# Window 'self.w':
windowWidth = 250
windowHeight = 200
windowWidthResize = 400 # user can resize width by this value
windowHeightResize = 650 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Parameter Reporter", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.ParameterReporter.mainwindow" # stores last window position and size
)
# UI ELEMENTS:
# Filter:
self.w.filter = vanilla.EditText(
(10, 10, -10, 19 ),
"",
sizeStyle='small',
callback=self.ParameterReporterMain
)
self.w.filter.getNSTextField().setToolTip_("Type one or more search terms here. Use * as wildcard.")
# Listing of Parameters:
self.w.ParameterList = vanilla.List(
(0, 40, -0, -0),
dir(GSLayer),
autohidesScrollers=False,
drawVerticalLines=True,
doubleClickCallback=self.copySelection,
rowHeight=19,
)
self.w.ParameterList.getNSTableView().tableColumns()[0].setWidth_(501)
self.w.ParameterList.getNSTableView().setToolTip_("Double click an entry to copy the respective parameter into the clipboard.")
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Parameter Reporter' could not load preferences. Will resort to defaults.")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
self.ParameterReporterMain(None)
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.ParameterReporter.filter"] = self.w.filter.get()
except:
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.ParameterReporter.filter", "")
self.w.filter.set( Glyphs.defaults["com.mekkablue.ParameterReporter.filter"] )
except:
return False
return True
def realmStringForParameter(self, parameterName):
realms = []
if parameterName in self.fontParameters:
realms.append("F")
if parameterName in self.masterParameters:
realms.append("M")
if parameterName in self.instanceParameters:
realms.append("I")
return "(%s)" % ",".join(realms)
def copySelection( self, sender ):
try:
indexes = self.w.ParameterList.getSelection()
Parameters = []
for index in indexes:
ParameterName = self.w.ParameterList[index]
parenthesisOffset = ParameterName.rfind("(")
ParameterName = ParameterName[:parenthesisOffset].strip()
clipboardPiece = '{"%s" = "";}' % ParameterName
Parameters.append(clipboardPiece)
# puts Parameters in clipboard:
clipboardString = "(%s)" % ",".join(Parameters)
if not setClipboard(clipboardString):
print("Warning: could not set clipboard.")
# Floating notification:
Glyphs.showNotification(
u"%i parameter%s copied" % (
len(indexes),
"" if len(indexes)==1 else "s",
),
u"You can paste in Font Info.",
)
except Exception as e:
Glyphs.showMacroWindow()
print("Parameter Reporter Error:\nCould not copy to clipboard.\n%s" % e)
def ParameterReporterMain( self, sender ):
try:
filterStringEntry = self.w.filter.get().strip()
filterStrings = filterStringEntry.split(" ")
try:
ParameterList = sorted( set(self.fontParameters + self.instanceParameters + self.masterParameters), key=lambda thisName: thisName.lower() )
for filterString in filterStrings:
if not "*" in filterString:
ParameterList = [ f for f in ParameterList if filterString.lower() in f.lower() ]
elif filterString.startswith("*"):
ParameterList = [ f for f in ParameterList if f.lower().endswith(filterString.lower()[1:]) ]
elif filterString.endswith("*"):
ParameterList = [ f for f in ParameterList if f.lower().startswith(filterString.lower()[:-1]) ]
else:
asteriskPos = filterString.find("*")
beginning = filterString[:asteriskPos].lower()
ending = filterString[asteriskPos+1:].lower()
ParameterList = [ f for f in ParameterList if f.lower().startswith(beginning) and f.lower().endswith(ending) ]
except:
ParameterList = []
if ParameterList:
ParameterList = [
"%s %s" % (item, self.realmStringForParameter(item)) for item in ParameterList
]
self.w.ParameterList.set(ParameterList)
if not self.SavePreferences( self ):
print("Note: 'Parameter Reporter' could not write preferences.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Parameter Reporter Error: %s" % e)
ParameterReporter() |
the-stack_0_8349 | import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torchvision.models as models
from torch.autograd import Variable
import common
import copy
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class DilationInception(nn.Module):
def __init__(self, channel, nlayers=3, kernel_size=3, se_reduction=16):
super(DilationInception, self).__init__()
dilations = [i+1 for i in range(nlayers)]
kernel_size_effective = [kernel_size + (kernel_size - 1) * (dilation - 1) for dilation in dilations]
paddings = [(i - 1)//2 for i in kernel_size_effective]
self.weights = nn.Parameter(0.1*torch.rand(nlayers), requires_grad=True)
self.branches = nn.ModuleList()
for dilation, padding in zip(dilations, paddings):
self.branches.append(nn.Sequential(
nn.Conv2d(channel, channel, kernel_size=kernel_size, dilation=dilation, padding=padding),
))
self.se = SELayer(channel, reduction=se_reduction)
def forward(self, x):
out = x
for w, branch in zip(self.weights, self.branches):
out = out + (w**2)*branch(x)
return self.se(out)
class BottleneckDecoderBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckDecoderBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm2d(in_planes + 32)
self.relu2 = nn.ReLU(inplace=True)
self.bn3 = nn.BatchNorm2d(in_planes + 2*32)
self.relu3 = nn.ReLU(inplace=True)
self.bn4 = nn.BatchNorm2d(in_planes + 3*32)
self.relu4 = nn.ReLU(inplace=True)
self.bn5 = nn.BatchNorm2d(in_planes + 4*32)
self.relu5 = nn.ReLU(inplace=True)
self.bn6 = nn.BatchNorm2d(in_planes + 5*32)
self.relu6= nn.ReLU(inplace=True)
self.bn7 = nn.BatchNorm2d(inter_planes)
self.relu7= nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv2 = nn.Conv2d(in_planes + 32, 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv2d(in_planes + 2*32, 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv4 = nn.Conv2d(in_planes + 3*32, 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv5 = nn.Conv2d(in_planes + 4*32, 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv6 = nn.Conv2d(in_planes + 5*32, inter_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv7 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out1 = self.conv1(self.relu1(self.bn1(x)))
out1 = torch.cat([x, out1], 1)
out2 = self.conv2(self.relu2(self.bn2(out1)))
out2 = torch.cat([out1, out2], 1)
out3 = self.conv3(self.relu3(self.bn3(out2)))
out3 = torch.cat([out2, out3], 1)
out4 = self.conv4(self.relu4(self.bn4(out3)))
out4 = torch.cat([out3, out4], 1)
out5 = self.conv5(self.relu5(self.bn5(out4)))
out5 = torch.cat([out4, out5], 1)
out6 = self.conv6(self.relu6(self.bn6(out5)))
out = self.conv7(self.relu7(self.bn7(out6)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
#out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class ResidualBlock(nn.Module):
def __init__(self, in_planes, dropRate=0.0):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
x1 = self.relu(self.conv1(x))
x2 = self.conv2(x1)
out = x + x2
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return out
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.interpolate(out, scale_factor=2, mode='bilinear')
class SEDenseDecoder(nn.Module):
def __init__(self):
super(SEDenseDecoder, self).__init__()
############# Block5-up 16-16 ##############
self.se5 = SELayer(384)
self.dense_block5 = BottleneckDecoderBlock(384, 256)
self.trans_block5 = TransitionBlock(640, 128)
self.residual_block51 = ResidualBlock(128)
self.residual_block52 = ResidualBlock(128)
############# Block6-up 32-32 ##############
self.se6 = SELayer(256)
self.dense_block6 = BottleneckDecoderBlock(256, 128)
self.trans_block6 = TransitionBlock(384, 64)
self.residual_block61 = ResidualBlock(64)
self.residual_block62 = ResidualBlock(64)
############# Block7-up 64-64 ##############
self.dense_block7 = BottleneckDecoderBlock(64, 64)
self.trans_block7 = TransitionBlock(128, 32)
self.residual_block71 = ResidualBlock(32)
self.residual_block72 = ResidualBlock(32)
## 128 X 128
############# Block8-up c ##############
self.dense_block8 = BottleneckDecoderBlock(32, 32)
self.trans_block8 = TransitionBlock(64, 16)
self.residual_block81 = ResidualBlock(16)
self.residual_block82 = ResidualBlock(16)
#Amir
# self.se_refin = SELayer(19, 3)
self.se_refin = SELayer(20, 3)
self.conv_refin = nn.Conv2d(20, 20, 3, 1, 1)
self.tanh = nn.Tanh()
self.conv1010 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.conv1020 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.conv1030 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.conv1040 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.refine3 = nn.Conv2d(20 + 4, 3, kernel_size=3, stride=1, padding=1)
self.upsample = F.interpolate
self.relu = nn.ReLU(inplace=True)
def forward(self, x, x1, x2, x4,depth, opt):
x42 = torch.cat([x4, x2], 1)
## 16 X 16
x5 = self.trans_block5(self.dense_block5(self.se5(x42)))
x5 = self.residual_block51(x5)
x5 = self.residual_block52(x5)
x52 = torch.cat([x5, x1], 1)
## 32 X 32
x6 = self.trans_block6(self.dense_block6(self.se6(x52)))
x6 = self.residual_block61(x6)
x6 = self.residual_block62(x6)
## 64 X 64
x7 = self.trans_block7(self.dense_block7(x6))
x7 = self.residual_block71(x7)
x7 = self.residual_block72(x7)
## 128 X 128
x8 = self.trans_block8(self.dense_block8(x7))
x8 = self.residual_block81(x8)
x8 = self.residual_block82(x8)
x8 = torch.cat([x8, x,depth], 1)
x9 = self.relu(self.conv_refin(self.se_refin(x8)))
shape_out = x9.data.size()
shape_out = shape_out[2:4]
x101 = F.avg_pool2d(x9, 32)
x102 = F.avg_pool2d(x9, 16)
x103 = F.avg_pool2d(x9, 8)
x104 = F.avg_pool2d(x9, 4)
x1010 = self.upsample(self.relu(self.conv1010(x101)), size=shape_out, mode='bilinear')
x1020 = self.upsample(self.relu(self.conv1020(x102)), size=shape_out, mode='bilinear')
x1030 = self.upsample(self.relu(self.conv1030(x103)), size=shape_out, mode='bilinear')
x1040 = self.upsample(self.relu(self.conv1040(x104)), size=shape_out, mode='bilinear')
dehaze = torch.cat((x1010, x1020, x1030, x1040, x9), 1)
dehaze = self.refine3(dehaze)
return dehaze
class DenseDecoderDilationInception(nn.Module):
def __init__(self):
super(DenseDecoderDilationInception, self).__init__()
############# Block5-up 16-16 ##############
self.di5 = DilationInception(384)
self.dense_block5 = BottleneckDecoderBlock(384, 256)
self.trans_block5 = TransitionBlock(640, 128)
self.residual_block51 = ResidualBlock(128)
self.residual_block52 = ResidualBlock(128)
############# Block6-up 32-32 ##############
self.di6 = DilationInception(256)
self.dense_block6 = BottleneckDecoderBlock(256, 128)
self.trans_block6 = TransitionBlock(384, 64)
self.residual_block61 = ResidualBlock(64)
self.residual_block62 = ResidualBlock(64)
############# Block7-up 64-64 ##############
self.dense_block7 = BottleneckDecoderBlock(64, 64)
self.trans_block7 = TransitionBlock(128, 32)
self.residual_block71 = ResidualBlock(32)
self.residual_block72 = ResidualBlock(32)
## 128 X 128
############# Block8-up c ##############
self.dense_block8 = BottleneckDecoderBlock(32, 32)
self.trans_block8 = TransitionBlock(64, 16)
self.residual_block81 = ResidualBlock(16)
self.residual_block82 = ResidualBlock(16)
self.di9 = DilationInception(20, se_reduction=3)
self.conv_refin = nn.Conv2d(20, 20, 3, 1, 1)
self.tanh = nn.Tanh()
self.conv1010 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.conv1020 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.conv1030 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.conv1040 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
self.refine3 = nn.Conv2d(20 + 4, 3, kernel_size=3, stride=1, padding=1)
self.upsample = F.interpolate
self.relu = nn.ReLU(inplace=True)
def forward(self, x, x1, x2, x4,depth, opt):
x42 = self.di5(torch.cat([x4, x2], 1))
## 16 X 16
x5 = self.trans_block5(self.dense_block5(x42))
x5 = self.residual_block51(x5)
x5 = self.residual_block52(x5)
x52 = self.di6(torch.cat([x5, x1], 1))
## 32 X 32
x6 = self.trans_block6(self.dense_block6(x52))
x6 = self.residual_block61(x6)
x6 = self.residual_block62(x6)
## 64 X 64
x7 = self.trans_block7(self.dense_block7(x6))
x7 = self.residual_block71(x7)
x7 = self.residual_block72(x7)
## 128 X 128
x8 = self.trans_block8(self.dense_block8(x7))
x8 = self.residual_block81(x8)
x8 = self.residual_block82(x8)
# x8 = torch.cat([x8, x], 1)
x8 = torch.cat([x8, x,depth], 1)
x9 = self.relu(self.conv_refin(self.di9(x8)))
shape_out = x9.data.size()
shape_out = shape_out[2:4]
x101 = F.avg_pool2d(x9, 32)
x102 = F.avg_pool2d(x9, 16)
x103 = F.avg_pool2d(x9, 8)
x104 = F.avg_pool2d(x9, 4)
x1010 = self.upsample(self.relu(self.conv1010(x101)), size=shape_out, mode='bilinear')
x1020 = self.upsample(self.relu(self.conv1020(x102)), size=shape_out, mode='bilinear')
x1030 = self.upsample(self.relu(self.conv1030(x103)), size=shape_out, mode='bilinear')
x1040 = self.upsample(self.relu(self.conv1040(x104)), size=shape_out, mode='bilinear')
dehaze = torch.cat((x1010, x1020, x1030, x1040, x9), 1)
dehaze = self.refine3(dehaze)
return dehaze
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
############# 256-256 ##############
with torch.no_grad():
haze_class = models.densenet121(pretrained=True)
# haze_class.features.conv0.r(False)
layer=haze_class.features.conv0.weight.clone().detach()
haze_class.features.conv0=nn.Conv2d(4,64,kernel_size=(7,7),stride=(2,2),padding=(3,3),bias=False)
haze_class.features.conv0.weight[:,0:3,:,:].copy_(layer)
haze_class.features.conv0.weight[:,3,:,:].copy_(layer[:,0,:,:]*0.0721+layer[:,1,:,:]*0.7154+layer[:,2,:,:]*0.2125)
# self.conv=haze_class.features.conv0
# self.conv0=nn.Conv2d(4,64,kernel_size=(7,7),stride=(2,2),padding=(3,3),bias=False)
# self.conv0.weight[:, 0:3, :, :]=copy.deepcopy(self.conv.weight)
# self.conv0.weight[:, 3, :, :]=copy.deepcopy(self.conv.weight[:,0,:,:]*0.0721+self.conv.weight[:,1,:,:]*0.7154+self.conv.weight[:,2,:,:]*0.2125)
self.conv0 = haze_class.features.conv0
self.norm0 = haze_class.features.norm0
self.relu0 = haze_class.features.relu0
self.pool0 = haze_class.features.pool0
############# Block1-down 64-64 ##############
self.dense_block1 = haze_class.features.denseblock1
self.trans_block1 = haze_class.features.transition1
############# Block2-down 32-32 ##############
self.dense_block2 = haze_class.features.denseblock2
self.trans_block2 = haze_class.features.transition2
############# Block3-down 16-16 ##############
self.dense_block3 = haze_class.features.denseblock3
self.trans_block3 = haze_class.features.transition3
############# Block4-up 8-8 ##############
self.Jdense_block4 = BottleneckDecoderBlock(512, 256)#512
self.Jtrans_block4 = TransitionBlock(768, 128)#768
self.Jresidual_block41 = ResidualBlock(128)
self.Jresidual_block42 = ResidualBlock(128)
self.ATdense_block4 = BottleneckDecoderBlock(512, 256)#512
self.ATtrans_block4 = TransitionBlock(768, 128)#768
self.ATresidual_block41 = ResidualBlock(128)
self.ATresidual_block42 = ResidualBlock(128)
self.Wdense_block4 = BottleneckDecoderBlock(512, 256)#512
self.Wtrans_block4 = TransitionBlock(768, 128)#768
self.Wresidual_block41 = ResidualBlock(128)
self.Wresidual_block42 = ResidualBlock(128)
self.decoder_A = SEDenseDecoder()
self.decoder_T = SEDenseDecoder()
self.decoder_J = DenseDecoderDilationInception()
#self.decoder_J=SEDenseDecoder()
self.decoder_w = SEDenseDecoder()
self.sigA=nn.Sigmoid()
self.convT1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
self.ResT = ResidualBlock(32)
self.convT = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1)
self.sigT = nn.Sigmoid()
self.convw1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
self.Resw = ResidualBlock(32)
self.convw = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1)
self.sigw = nn.Sigmoid()
# self.refine1 = nn.Conv2d(3, 20, kernel_size=3, stride=1, padding=1)
# self.bn_refine1 = nn.BatchNorm2d(20)
# self.refine2 = nn.Conv2d(20, 20, kernel_size=3, stride=1, padding=1)
# self.bn_refine2 = nn.BatchNorm2d(20)
# self.refine3 = nn.Conv2d(20 + 4, 3, kernel_size=3, stride=1, padding=1)
# self.threshold = nn.Threshold(0.1, 0.1)
# self.conv1010 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
# self.bn_conv1010 = nn.BatchNorm2d(1)
# self.conv1020 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
# self.bn_conv1020 = nn.BatchNorm2d(1)
# self.conv1030 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
# self.bn_conv1030 = nn.BatchNorm2d(1)
# self.conv1040 = nn.Conv2d(20, 1, kernel_size=1, stride=1, padding=0) # 1mm
# self.bn_conv1040 = nn.BatchNorm2d(1)
# self.upsample = F.interpolate
# self.relu = nn.ReLU(inplace=True)
self.JGen = common.JGenerate()
def split_params(self):
pretrained_params = []
rest_params = []
for name, module in self.named_children():
if (name == "conv0") or (name == "norm0") or (name == "relu0") or (name == "pool0") or \
(name == "dense_block1") or (name == "dense_block2") or (name == "dense_block3") or \
(name == "trans_block1") or (name == "trans_block2") or (name == "trans_block3"):
for p in module.parameters():
pretrained_params.append(p)
else:
for p in module.parameters():
rest_params.append(p)
return pretrained_params, rest_params
def set_parameters(self, models, value):
dicts = {
'encoder': [self.conv0, self.norm0, self.relu0, self.pool0, self.dense_block1, self.dense_block2, self.dense_block3, self.trans_block1, self.trans_block2, self.trans_block3],
'J_AT': [self.decoder_A, self.decoder_T, self.convT, self.convT1, self.sigT, self.ResT],
'J_direct': [self.decoder_J],
'w': [self.decoder_w, self.convw1, self.convw, self.sigw, self.Resw]
}
if not isinstance(models, list):
models = [models]
for model in models:
# print(model)
for block in dicts[model]:
# print(block.__class__.__name__)
for module in block.modules():
for p in module.parameters():
p.requires_grad=value
def freeze(self, models):
print('Freezing the following:')
print(models)
self.set_parameters(models, False)
def unfreeze(self, models):
print('Unfreezing the following:')
print(models)
self.set_parameters(models, True)
def forward(self, x,depth, opt):
## 256x256
# print("input {}".format((x!=x).any()))
x0 = self.pool0(self.relu0(self.norm0(self.conv0(torch.cat([x,depth],1)))))
# print("x0 {}".format((x0!=x0).any()))
## 64 X 64
x1 = self.dense_block1(x0)
# print("x1 {}".format((x1!=x1).any()))
# print x1.size()
x1 = self.trans_block1(x1)
# print("x1 {}".format((x1!=x1).any()))
### 32x32
x2 = self.trans_block2(self.dense_block2(x1))
# print x2.size()
# print("x2 {}".format((x2!=x2).any()))
### 16 X 16
x3 = self.trans_block3(self.dense_block3(x2))
# print("x3 {}".format((x3!=x3).any()))
# x3=Variable(x3.data,requires_grad=True)
## 8 X 8
x4J = self.Jtrans_block4(self.Jdense_block4(x3))
#Amir
x4AT = self.ATtrans_block4(self.ATdense_block4(x3))
x4W = self.Wtrans_block4(self.Wdense_block4(x3))
x4J = self.Jresidual_block41(x4J)
x4J = self.Jresidual_block42(x4J)
#Amir
x4AT = self.ATresidual_block41(x4AT)
x4AT = self.ATresidual_block42(x4AT)
x4W = self.Wresidual_block41(x4W)
x4W = self.Wresidual_block42(x4W)
######################################
#Amir
# A = self.decoder_A(x, x1, x2, x4AT,depth, opt)
A = self.decoder_A(x, x1, x2, x4AT, depth, opt)
T = self.decoder_T(x, x1, x2, x4AT,depth, opt)
T=self.sigT(T)
# T = self.sigT(self.convT(self.ResT(self.convT1(T))))
# T = T.repeat(1, 3, 1, 1)
# J_AT = self.JGen(A=A, t=T, I=x)
J_AT=torch.mul(A,T)
J_direct = self.decoder_J(x, x1, x2, x4J,depth, opt)
w = self.decoder_w(x, x1, x2, x4W,depth, opt)
w = self.sigw(self.convw(self.Resw(self.convw1(w))))
w = w.repeat(1, 3, 1, 1)
J_total = w*J_direct + (1-w)*J_AT
return J_total,J_direct,J_AT,A,T,w
|
the-stack_0_8350 | from discord.ext.commands import context
from ..utils import RedisDict
__all__ = ("Context",)
class Context(context.Context):
def __init__(self, **kwargs):
super(Context, self).__init__(**kwargs)
self._storage = None
@property
def storage(self):
if self._storage is None:
self._storage = RedisDict(
self.bot.redis,
prefix=f'storage:{self.cog.qualified_name if self.cog else "cog"}:{self.guild.id if self.guild else self.channel.id}',
)
return self._storage
|
the-stack_0_8351 | # This class loads the data
# 1. Skeleton (.skel or .mat)
# 2. Video (a folder with frames XXXXXX_[index].png or .jpg or .jpeg
# if you have actual video you can use ffmpeg to split it.
# 3. Choreography (.svl)
# 4. Music beats (.txt)
import os
import DanceAnno_Application
__author__ = 'DIMITRIOS'
from tkinter import *
from tkinter import ttk # ttk is a little more beautiful than tk.
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.messagebox import showerror
from tkinter import messagebox
sys.path.append( os.path.join('.', 'Utils' ))
import readsvl # function to read svls
import readtxt # function to read txts
import readskel # function to read body skeleton trajectories
# if PyCharm underlines them with red, just ignore (alt+enter -> ignore)
class Loader:
def __init__(self):
# This variable will automatic drive the folders selection and it will shorten your clicks
self.debug_FLAG = False
self.db = 'salsa' # salsa or calus
self.debug_fastaccess = 'bertrand_c3_t1'
# Are the data loaded ?
self.skeletonLoadedFlag = False
self.videoLoadedFlag = False
self.choreoLoadedFlag = False
# GUI init
self.root = Tk()
self.root.configure(background='#000')
self.root.title("Dance Annotator")
# ask for permission to close window
self.root.protocol("WM_DELETE_WINDOW", self.close_window)
# Window initial dimensions
w = 900 # The value of the width
h = 300 # The value of the height of the window
# Your screen width and height
ws = self.root.winfo_screenwidth()
hs = self.root.winfo_screenheight()
# Top left corner of the window
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
# Data
# Sampling rate of Kinect
self.Fs = 0
# Length of the Kinect signals
self.length_signal_samples = 0
# Number of music beats (optional)
self.nBeats = 0
# Store the index of Video frames (XXXXXXX_[index].jpg)
self.indexFrames = []
self.dname = "" # directory where video frames are located
self.prefixname = "" # the part before underscore of XXXXX_[index].jpg
self.annotationSecs = [] # First level annotation
self.labels = {}
self.annotationSecsB = [] # Second level annotation
self.labelsB = {}
self.beats = {} # Beats indicators
# Vars to indicate the parsing status of each file
self.skeletonStatusSTR = StringVar()
self.skeletonStatusSTR.set("Empty")
self.videoStatusSTR = StringVar()
self.videoStatusSTR.set("Empty")
self.choreoStatusSTR = StringVar()
self.choreoStatusSTR.set("Empty")
self.mbeatsStatusSTR = StringVar()
self.mbeatsStatusSTR.set("Empty")
# Start the GUI design
# Coloring style for ttk
style = ttk.Style()
style.configure("BW.TFrame", foreground="black", background="white")
style.configure("BW.TLabel", foreground="black", background="white")
style.configure("BW.TCheckbutton", foreground="black", background="white")
# Frame containing the loading functionalities
self.fr_filedialog = ttk.Frame(self.root, style="BW.TFrame")
# Frame containing the GUI navigation processes (Continue or Exit)
self.fr_exitcontinue = ttk.Frame(self.root, style="BW.TFrame")
# Just some text to explain what we are doing
self.lbl_explain = ttk.Label(self.fr_filedialog, text="Select the resources to annotate", style="BW.TLabel")
# --- FILE SELECTION WIDGETS ----
# 1 SKELETON
self.lbl_namelbl_mat_skeleton = ttk.Label(self.fr_filedialog, text="Skeleton Data", style="BW.TLabel")
self.entry_name_mat = Entry(self.fr_filedialog)
self.bt_mat_load = Button(self.fr_filedialog, text="...", command=self.loadSkeletonData)
self.lbl_namelbl_hint_skeleton = ttk.Label(self.fr_filedialog, text=".mat or .skel", style="BW.TLabel")
self.lbl_namelbl_status_skeleton = ttk.Label(self.fr_filedialog, textvariable=self.skeletonStatusSTR, style="BW.TLabel")
#self.separatorBtSkel = ttk.Separator(self.fr_filedialog,orient=VERTICAL)
# 2 VIDEO FRAMES
self.lbl_namelbl_frames_video = ttk.Label(self.fr_filedialog, text="Folder with frame data", style="BW.TLabel")
self.entry_name_frames = Entry(self.fr_filedialog)
self.bt_frames = Button(self.fr_filedialog, text="...", command= self.loadFramesByDirectory)
self.lbl_namelbl_hint_video = ttk.Label(self.fr_filedialog, text="A folder with jpeg, jpg, or png files", style="BW.TLabel")
self.lbl_namelbl_status_video = ttk.Label(self.fr_filedialog, textvariable=self.videoStatusSTR, style="BW.TLabel")
#self.separatorFramesVideo = ttk.Separator(self.fr_filedialog,orient=VERTICAL)
# 3 CHOREOGRAPHY
self.lbl_load_choreo = ttk.Label(self.fr_filedialog, text="Load existing choreography (Optional)", style="BW.TLabel")
self.entry_name_choreo = Entry(self.fr_filedialog)
self.bt_load_ch = Button(self.fr_filedialog, text="...", command= self.loadChoreography)
self.lbl_namelbl_hint_choreo = ttk.Label(self.fr_filedialog, text="Provide an existing .txt otherwise a new one will be created", style="BW.TLabel" )
self.lbl_namelbl_status_choreo = ttk.Label(self.fr_filedialog, textvariable=self.choreoStatusSTR, style="BW.TLabel")
# 4 Music beats
self.lbl_load_mbeats = ttk.Label(self.fr_filedialog, text="Load music beats (Optional)", style="BW.TLabel")
self.entry_name_mbeats = Entry(self.fr_filedialog)
self.bt_load_mbeats = Button(self.fr_filedialog, text="...", command= self.loadMusicBeats)
self.lbl_namelbl_hint_mbeats = ttk.Label(self.fr_filedialog, text="Music beats in .txt format", style="BW.TLabel")
self.lbl_namelbl_status_mbeats = ttk.Label(self.fr_filedialog, textvariable=self.mbeatsStatusSTR, style="BW.TLabel")
self.bt_continue = Button(self.fr_exitcontinue, text="Continue", command=self.StartAnno, state = DISABLED)
self.bt_exit = Button(self.fr_exitcontinue, text="Exit", command=self.close_window)
# --- PLACEMENT OF WIDGETs IN THE ROOT WINDOW -------
self.fr_filedialog.grid(row=0, column=0, columnspan=4, sticky=(N, S, E, W), padx=5)
self.fr_exitcontinue.grid(row=1, column=0, columnspan=4, sticky=(E), ipadx=50, padx=5)
# Explanation
self.lbl_explain.grid(row=0, column=0, columnspan=4, rowspan=1, sticky=(E,W), padx=5)
# Labels
self.lbl_namelbl_mat_skeleton.grid(column=0, sticky=(W), row=1, columnspan=1, rowspan=1, pady=5, padx=5)
self.entry_name_mat.grid(column=1, sticky=(N, S, E, W), row=1, columnspan=1, rowspan=1, pady=5, padx=5)
self.bt_mat_load.grid(column=2, sticky=(N, S, E, W), row=1, columnspan=1, rowspan=1, pady=5, padx=5)
self.lbl_namelbl_hint_skeleton.grid(column=3, sticky=(W), row=1, columnspan=1, rowspan=1, padx=5)
self.lbl_namelbl_status_skeleton.grid(column=4, sticky=(W), row=1, columnspan=1, rowspan=1, padx=5)
#self.separatorBtSkel.pack(side="left", fill=Y, padx=5)
self.lbl_namelbl_frames_video.grid(row=2, column=0, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.entry_name_frames.grid(row=2, column=1, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.bt_frames.grid(row=2, column=2, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.lbl_namelbl_hint_video.grid(row=2, column=3, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_namelbl_status_video.grid(row=2, column=4, columnspan=1, rowspan=1, sticky=(W), padx=5)
#self.separatorFramesVideo.pack(side="left", fill=Y, padx=5)
self.lbl_load_choreo.grid(row=3, column=0, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.entry_name_choreo.grid(row=3, column=1, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.bt_load_ch.grid(row=3, column=2, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.lbl_namelbl_hint_choreo.grid(row=3, column=3, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_namelbl_status_choreo.grid(row=3, column=4, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_load_mbeats.grid(row=4, column=0, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.entry_name_mbeats.grid(row=4, column=1, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.bt_load_mbeats.grid(row=4, column=2, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.lbl_namelbl_hint_mbeats.grid(row=4, column=3, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_namelbl_status_mbeats.grid(row=4, column=4, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.bt_exit.grid(row = 0, column = 3, sticky = (E), pady = 5, padx = 15, ipadx=25)
self.bt_continue.grid(row = 0, column = 4, sticky = (W), pady = 5, padx = 15, ipadx = 15)
ttk.Sizegrip().grid(row=6, column=3, sticky=(E))
#--------------------
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.fr_filedialog.columnconfigure(0, weight=1)
self.fr_filedialog.columnconfigure(1, weight=1)
self.fr_filedialog.columnconfigure(2, weight=1, minsize=30)
self.fr_filedialog.columnconfigure(3, weight=1)
self.fr_filedialog.columnconfigure(4, weight=1, minsize=100)
# for i in range(4):
# self.fr_filedialog.rowconfigure(i, weight=1)
self.root.resizable(True, True)
# If in debugging mode then load automatically the files
if self.debug_FLAG:
self.loadSkeletonData()
self.loadFramesByDirectory()
self.loadChoreography()
self.loadMusicBeats()
self.root.after(1000, self.StartAnno)
# Ignite GUI
self.root.mainloop()
return
# --- SKELETON DATA -------
def loadSkeletonData(self):
if self.debug_FLAG:
if self.db == 'salsa':
fname = 'Data\\Salsa\\performance-trajectories\\' + self.debug_fastaccess + '_kinect_1.mat'
elif self.db == 'calus':
fname = 'Data\\Calus\\rec.skel'
else:
if self.db == 'salsa':
fname = askopenfilename(initialdir='Data\\Salsa\\performance-trajectories',
filetypes=(("mat file", "*.mat"),("skel file", "*.skel"), ("All files", "*.*") ))
elif self.db == 'calus':
fname = askopenfilename(initialdir='Data\\Calus',
filetypes=(("skel file", "*.skel"), ("mat file", "*.mat"), ("All files", "*.*") )) #performance-trajectories
if fname:
try:
self.entry_name_mat.insert(0, "..." + fname[-30:])
dummy, fextension = os.path.splitext(fname)
# ------- load skeleton trajectories -----------------------
if fextension=='.mat':
self.signals_wrapper, self.Fs = readskel.readmatlab_wrapper(fname)
else: # .skel
self.signals_wrapper, self.Fs = readskel.skelparser(fname)
nJoints = len(self.signals_wrapper)
sigA = next(iter(self.signals_wrapper.values()))
nTrajects = len(sigA[0])
self.skeletonStatusSTR.set(str(nTrajects) + " trajects")
self.skeletonLoadedFlag = True
self.checkContinueEnable()
# global Fs, length_signal_samples
self.length_signal_samples = nTrajects
# put a separation line
separatorBtsA = ttk.Separator(self.fr_filedialog, orient=HORIZONTAL)
separatorBtsA.grid(row=5, column=0, columnspan=5, sticky="WE")
# show available joints
self.signalsSelected = {}
self.chb_joint = {}
i = 0
for key,v in sorted(self.signals_wrapper.items()):
self.signalsSelected[key] = IntVar()
if key in ('Left foot', 'Right foot'):
self.signalsSelected[key].set(1)
self.chb_joint[key] = ttk.Checkbutton(self.fr_filedialog, text = key, variable = self.signalsSelected[key], style="BW.TCheckbutton")
self.chb_joint[key].grid(row=6 + i % 10, column=1+i//10, columnspan=1, rowspan=1, sticky=(W))
i += 1
#make my screen dimensions work
w = 900 #The value of the width
h = 300 + 12*22 #The value of the height of the window
ws = self.root.winfo_screenwidth()#This value is the width of the screen
hs = self.root.winfo_screenheight()#This is the height of the screen
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
#self.root.mainloop()
except Exception as e: # <- naked except is a bad idea
self.skeletonLoadedFlag = False
self.checkContinueEnable()
print(e)
showerror("Open Source File", "Failed to read file\n'%s'\n'%s'" % (fname, e))
return
return
#=========== Load directory of frames ======================================
def loadFramesByDirectory(self):
if self.debug_FLAG:
if self.db == 'salsa':
self.dname = "Data\\Salsa\\Videos\\" + self.debug_fastaccess + "_kinect_1"
elif self.db == 'calus':
self.dname = "Data\\Calus\\frames"
else:
if self.db == 'salsa':
self.dname = askdirectory(initialdir='Data\\Salsa\\Videos')
elif self.db == 'calus':
self.dname = askdirectory(initialdir='Data\\Calus')
if self.dname:
try:
self.entry_name_frames.insert(0,"..." + self.dname[-30:])
self.indexFrames = []
for file in os.listdir(self.dname):
dum, self.checkvideof_ext = os.path.splitext(file)
if self.checkvideof_ext in ('.jpeg', '.JPG', '.JPEG', '.png', '.bmp', '.PNG', '.BMP'):
dum, self.videof_ext = os.path.splitext(file)
k = file.rfind("_")
l = file.rfind(".")
iFrame = file[k+1:l]
if iFrame[0] == 'f':
iFrame = iFrame[1:]
self.indexFrames.append(int(iFrame))
self.prefixname = file[:k+2]
else:
self.indexFrames.append(int(iFrame))
self.prefixname = file[:k+1]
self.indexFrames = sorted(self.indexFrames)
self.videoStatusSTR.set( str(len(self.indexFrames)) + " Frames" )
self.videoLoadedFlag = True
elif file in ('Thumbs.db'):
continue
else:
showerror("Fail", "Only jpeg, jpg, JPG, bmp, BMP, png, PNG frames are supported")
self.videoLoadedFlag = False
return
self.checkContinueEnable()
except Exception as e: # <- naked except is a bad idea
self.videoLoadedFlag = False
self.checkContinueEnable()
showerror("Error", ("Open Source File\n'%s'" % e) + "\n" + ("Failed to open directory\n'%s'" % self.dname))
return
return
# =========== LOAD SVL CHOREOGRAPHY ===============================
def loadChoreography(self):
if self.debug_FLAG:
if self.db == 'salsa':
tempf =self.debug_fastaccess
tempf = list(tempf)
tempf[0] = tempf[0].upper()
tempf = ''.join(tempf)
fname = "Data\\Salsa\\SVL\\" + tempf + "_DanceAnnotationTool.svl"
elif self.db == 'calus':
fname = "Data\\Calus\\DanceAnnotationTool.txt"
else:
if self.db == 'salsa':
fname = askopenfilename(initialdir='Data\\Salsa\\SVL', filetypes=(("svl file", "*.svl"), ("txt file", "*.txt"), ("All files", "*.*") ))
elif self.db == 'calus':
fname = askopenfilename(initialdir='Data\\Calus', filetypes=(("txt file", "*.txt"), ("svl file", "*.svl"), ("All files", "*.*") ))
dummy, fextension = os.path.splitext(fname)
if fname:
try:
if fextension == '.svl':
params, self.annotationSecs, self.labels = readsvl.extractSvlAnnotRegionFile(fname)
self.entry_name_choreo.insert(0,"..." + fname[-30:])
self.choreoStatusSTR.set(str(len(self.labels)) + " labels")
self.choreoLoadedFlag = True
self.checkContinueEnable()
elif fextension == '.txt':
self.annotationSecs, self.labels, self.annotationSecsB, self.labelsB = readtxt.parse(fname)
self.entry_name_choreo.insert(0,"..." + fname[-30:])
self.choreoStatusSTR.set(str(len(self.labels)) + " labels")
self.choreoLoadedFlag = True
self.checkContinueEnable()
else:
showerror("Waring", "Parser does not exists for such a file, only svl or txt are supported")
except Exception as e:
self.choreoLoadedFlag = False
self.checkContinueEnable()
msg = "There was a problem in loading!\n'%s'" % e
if messagebox.askyesno("Error", msg + "\n" + "Do you want to choose another file?"):
self.loadChoreography()
else:
return
return
#=================== Music beats ========================================
def loadMusicBeats(self):
if self.debug_FLAG:
if self.db=='salsa':
fname = 'Data\\Salsa\\MusicBeats\\' + self.debug_fastaccess + '_feetcam-beats.txt'
else:
fname = None
else:
fname = askopenfilename(initialdir='Data\\Salsa\\MusicBeats',
filetypes=(("beats file", "*.txt"), ("All files", "*.*") )) #performance-trajectories
if fname:
try:
self.entry_name_mbeats.insert(0, "..." + fname[-30:])
dummy, fextension = os.path.splitext(fname)
# ------- load skeleton trajectories -----------------------
if fextension=='.txt':
self.beats = readtxt.parse_mbeats(fname)
else:
showerror("Error","Only txt file extension is supported")
return
self.nBeats = len(self.beats)
self.mbeatsStatusSTR.set(str(self.nBeats) + " Beats")
except Exception as e: # <- naked except is a bad idea
showerror("Open Source File", "Failed to read file\n'%s'\n'%s'" % (fname, e))
return
return
def close_window(self):
#if messagebox.askokcancel("Exit", "Are you sure?"):
self.root.destroy()
def StartAnno(self):
self.root.destroy()
DanceAnno_Application.Application.StartAnnotating(self)
def checkContinueEnable(self):
if self.skeletonLoadedFlag and self.videoLoadedFlag: # and self.choreoLoadedFlag:
self.bt_continue.config(state = NORMAL) |
the-stack_0_8352 | # coding=utf-8
# Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" WavLM model configuration"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/wavlm-base-960h": "https://huggingface.co/facebook/wavlm-base-960h/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class WavLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the WavLM
[facebook/wavlm-base-960h](https://huggingface.co/facebook/wavlm-base-960h) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32):
Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`WavLMModel`]. Vocabulary size of the model. Defines the different tokens
that can be represented by the *inputs_ids* passed to the forward method of [`WavLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`WavLMForCTC`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for quantized feature encoder states.
conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the the length of *conv_dim*.
conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
length of *conv_kernel* defines the number of convolutional layers and has to match the the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
False` corresponds to applying layer norm after the attention layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://arxiv.org/abs/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Propability of each feature vector along the time axis to be chosen as the start of the vector span to be
masked. Approximately `mask_time_prob * sequence_length // mask_time_length` feature vectors will be masked
along the time axis. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Propability of each feature vector along the feature axis to be chosen as the start of the vector span to
be masked. Approximately `mask_time_prob * hidden_size // mask_time_length` feature vectors will be masked
along the time axis. This is only relevant if `apply_spec_augment is True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
num_codevectors_per_group (`int`, *optional*, defaults to 320):
Number of entries in each quantization codebook (group).
num_codevector_groups (`int`, *optional*, defaults to 2):
Number of codevector groups for product codevector quantization.
contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
The temperature *kappa* in the contrastive loss.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for the output of the feature encoder that's used by the quantizer.
num_negatives (`int`, *optional*, defaults to 100):
Number of negative samples for the contrastive loss.
codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the quantized feature vectors.
proj_codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the final projection of both the quantized and the transformer features.
diversity_loss_weight (`int`, *optional*, defaults to 0.1):
The weight of the codebook diversity loss component.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`WavLMForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`WavLMForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`WavLMForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
tdnn_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
tdnn_kernel (`Tuple[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
*XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
tdnn_dilation (`Tuple[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
*XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
xvector_output_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
add_adapter (`bool`, *optional*, defaults to `False`):
Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for
warm-starting Wav2Vec2 for SpeechEncoderDecoder models.
adapter_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adapter_stride (`int`, *optional*, defaults to 2):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
num_adapter_layers (`int`, *optional*, defaults to 3):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
output_hidden_size (`int`, *optional*):
Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
if `add_adapter is True`.
Example:
```python
```
Example:
```python
>>> from transformers import WavLMModel, WavLMConfig
>>> # Initializing a WavLM facebook/wavlm-base-960h style configuration
>>> configuration = WavLMConfig()
>>> # Initializing a model from the facebook/wavlm-base-960h style configuration
>>> model = WavLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "wavlm"
def __init__(
self,
vocab_size=32,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
feat_proj_dropout=0.0,
feat_quantizer_dropout=0.0,
final_dropout=0.1,
layerdrop=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
feat_extract_norm="group",
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
num_buckets=320,
max_bucket_distance=800,
do_stable_layer_norm=False,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
num_codevectors_per_group=320,
num_codevector_groups=2,
contrastive_logits_temperature=0.1,
num_negatives=100,
codevector_dim=256,
proj_codevector_dim=256,
diversity_loss_weight=0.1,
ctc_loss_reduction="mean",
ctc_zero_infinity=False,
use_weighted_layer_sum=False,
classifier_proj_size=256,
tdnn_dim=(512, 512, 512, 512, 1500),
tdnn_kernel=(5, 3, 3, 1, 1),
tdnn_dilation=(1, 2, 3, 1, 1),
xvector_output_dim=512,
num_ctc_classes=80,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
add_adapter=False,
adapter_kernel_size=3,
adapter_stride=2,
num_adapter_layers=3,
output_hidden_size=None,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_buckets = num_buckets
self.max_bucket_distance = max_bucket_distance
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.num_ctc_classes = num_ctc_classes
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. "
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
)
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
# parameters for pretraining with codevector quantized representations
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
# ctc loss
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# adapter
self.add_adapter = add_adapter
self.adapter_kernel_size = adapter_kernel_size
self.adapter_stride = adapter_stride
self.num_adapter_layers = num_adapter_layers
self.output_hidden_size = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
|
the-stack_0_8353 | """
molecool
A python package for analyzing and visualzing xyz files.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='molecool',
author='Chris Handelmann',
author_email='[email protected]',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
the-stack_0_8356 | from typing import List
class Solution:
def findOriginalArray(self, changed: List[int]) -> List[int]:
changed = sorted(changed, key = lambda x: x)
_len = len(changed)
if _len % 2 != 0:
return []
half = int(_len >> 1)
ans = []
visited = [0] * _len
left = 0
right = 1
while left < _len:
while left < _len and visited[left] == -1:
left += 1
if left >= _len:
break
l_val = changed[left]
dou = l_val * 2
right = max(left + 1, right)
while right < _len and (changed[right] != dou or visited[right] < 0):
right += 1
if right >= _len:
return []
ans.append(l_val)
visited[right] = -1
visited[left] = -1
if len(ans) != half:
return []
return ans
def findOriginalArray2(self, changed: List[int]) -> List[int]:
changed.sort()
_len = len(changed)
if _len % 2 != 0:
return []
half = int(_len >> 1)
count = 0
ans = []
visited = [0] * _len
for i in range(_len):
cur = changed[i]
if visited[i] < 0:
continue
double_val = changed[i] * 2
if double_val not in changed:
return []
for j in range(i + 1, _len):
if visited[j] < 0:
continue
if double_val == changed[j]:
ans.append(cur)
visited[j] = -1
count += 1
break
if count != half:
return []
return ans
if __name__ == '__main__':
sol = Solution()
nums = [
# 0, 0, 0, 0
1, 3, 4, 2, 6, 8
# 6, 3, 0, 1
]
sol.findOriginalArray(nums)
|
the-stack_0_8357 | import dynet as dy
import numpy as np
from xnmt.loss import FactoredLossExpr
from xnmt.persistence import serializable_init, Serializable, Ref
from xnmt.vocab import Vocab
from xnmt.constants import INFINITY
import xnmt.evaluator
import xnmt.linear as linear
class LossCalculator(object):
'''
A template class implementing the training strategy and corresponding loss calculation.
'''
def __call__(self, translator, initial_state, src, trg):
raise NotImplementedError()
def remove_eos(self, sequence, eos_sym=Vocab.ES):
try:
idx = sequence.index(Vocab.ES)
sequence = sequence[:idx]
except ValueError:
# NO EOS
pass
return sequence
class MLELoss(Serializable, LossCalculator):
yaml_tag = '!MLELoss'
# TODO: document me
@serializable_init
def __init__(self):
pass
def __call__(self, translator, initial_state, src, trg):
dec_state = initial_state
trg_mask = trg.mask if xnmt.batcher.is_batched(trg) else None
losses = []
seq_len = len(trg[0]) if xnmt.batcher.is_batched(src) else len(trg)
if xnmt.batcher.is_batched(src):
for j, single_trg in enumerate(trg):
assert len(single_trg) == seq_len # assert consistent length
assert 1==len([i for i in range(seq_len) if (trg_mask is None or trg_mask.np_arr[j,i]==0) and single_trg[i]==Vocab.ES]) # assert exactly one unmasked ES token
for i in range(seq_len):
ref_word = trg[i] if not xnmt.batcher.is_batched(src) \
else xnmt.batcher.mark_as_batch([single_trg[i] for single_trg in trg])
dec_state.context = translator.attender.calc_context(dec_state.rnn_state.output())
word_loss = translator.decoder.calc_loss(dec_state, ref_word)
if xnmt.batcher.is_batched(src) and trg_mask is not None:
word_loss = trg_mask.cmult_by_timestep_expr(word_loss, i, inverse=True)
losses.append(word_loss)
if i < seq_len-1:
dec_state = translator.decoder.add_input(dec_state, translator.trg_embedder.embed(ref_word))
return FactoredLossExpr({"mle": dy.esum(losses)})
class ReinforceLoss(Serializable, LossCalculator):
yaml_tag = '!ReinforceLoss'
# TODO: document me
@serializable_init
def __init__(self, evaluation_metric=None, sample_length=50, use_baseline=False,
inv_eval=True, decoder_hidden_dim=Ref("exp_global.default_layer_dim"), baseline=None):
self.use_baseline = use_baseline
self.inv_eval = inv_eval
if evaluation_metric is None:
self.evaluation_metric = xnmt.evaluator.FastBLEUEvaluator(ngram=4, smooth=1)
else:
self.evaluation_metric = evaluation_metric
if self.use_baseline:
self.baseline = self.add_serializable_component("baseline", baseline,
lambda: linear.Linear(input_dim=decoder_hidden_dim, output_dim=1))
def __call__(self, translator, initial_state, src, trg):
# TODO(philip30): currently only using the best hypothesis / first sample for reinforce loss
# A small further implementation is needed if we want to do reinforce with multiple samples.
search_output = translator.search_strategy.generate_output(translator, initial_state)[0]
# Calculate evaluation scores
self.eval_score = []
for trg_i, sample_i in zip(trg, search_output.word_ids):
# Removing EOS
sample_i = self.remove_eos(sample_i.tolist())
ref_i = self.remove_eos(trg_i.words)
# Evaluating
if len(sample_i) == 0:
score = 0
else:
score = self.evaluation_metric.evaluate(ref_i, sample_i) * \
(-1 if self.inv_eval else 1)
self.eval_score.append(score)
self.true_score = dy.inputTensor(self.eval_score, batched=True)
# Composing losses
loss = FactoredLossExpr()
if self.use_baseline:
baseline_loss = []
losses = []
for state, logsoft, mask in zip(search_output.state,
search_output.logsoftmaxes,
search_output.mask):
bs_score = self.baseline(state)
baseline_loss.append(dy.squared_distance(self.true_score, bs_score))
loss_i = dy.cmult(logsoft, self.true_score - bs_score)
losses.append(dy.cmult(loss_i, dy.inputTensor(mask, batched=True)))
loss.add_loss("reinforce", dy.sum_elems(dy.esum(losses)))
loss.add_loss("reinf_baseline", dy.sum_elems(dy.esum(baseline_loss)))
else:
loss.add_loss("reinforce", dy.sum_elems(dy.cmult(self.true_score, dy.esum(logsofts))))
return loss
class MinRiskLoss(Serializable, LossCalculator):
yaml_tag = '!MinRiskLoss'
@serializable_init
def __init__(self, evaluation_metric=None, alpha=0.005, inv_eval=True, unique_sample=True):
# Samples
self.alpha = alpha
if evaluation_metric is None:
self.evaluation_metric = xnmt.evaluator.FastBLEUEvaluator(ngram=4, smooth=1)
else:
self.evaluation_metric = evaluation_metric
self.inv_eval = inv_eval
self.unique_sample = unique_sample
def __call__(self, translator, initial_state, src, trg):
batch_size = len(trg)
uniques = [set() for _ in range(batch_size)]
deltas = []
probs = []
search_outputs = translator.search_strategy.generate_output(translator, initial_state, forced_trg_ids=trg)
for search_output in search_outputs:
logprob = search_output.logsoftmaxes
sample = search_output.word_ids
attentions = search_output.attentions
logprob = dy.esum(logprob) * self.alpha
# Calculate the evaluation score
eval_score = np.zeros(batch_size, dtype=float)
mask = np.zeros(batch_size, dtype=float)
for j in range(batch_size):
ref_j = self.remove_eos(trg[j].words)
hyp_j = self.remove_eos(sample[j].tolist())
if self.unique_sample:
hash_val = hash(tuple(hyp_j))
if len(hyp_j) == 0 or hash_val in uniques[j]:
mask[j] = -INFINITY
continue
else:
# Count this sample in
uniques[j].add(hash_val)
# Calc evaluation score
eval_score[j] = self.evaluation_metric.evaluate(ref_j, hyp_j) * \
(-1 if self.inv_eval else 1)
# Appending the delta and logprob of this sample
prob = logprob + dy.inputTensor(mask, batched=True)
deltas.append(dy.inputTensor(eval_score, batched=True))
probs.append(prob)
sample_prob = dy.softmax(dy.concatenate(probs))
deltas = dy.concatenate(deltas)
risk = dy.sum_elems(dy.cmult(sample_prob, deltas))
### Debug
#print(sample_prob.npvalue().transpose()[0])
#print(deltas.npvalue().transpose()[0])
#print("----------------------")
### End debug
return FactoredLossExpr({"risk": risk})
|
the-stack_0_8358 | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow_probability import distributions
from tensorflow.python import keras
from tensorflow.python.keras.engine.network import Network
class QFunction(Network):
def __init__(self, hidden_layer_sizes, **kwargs):
super(QFunction, self).__init__(**kwargs)
self._hidden_layer_sizes = hidden_layer_sizes
def build(self, input_shape):
inputs = [
layers.Input(batch_shape=input_shape[0], name='observations'),
layers.Input(batch_shape=input_shape[1], name='actions')
]
x = layers.Concatenate(axis=1)(inputs)
for hidden_units in self._hidden_layer_sizes:
x = layers.Dense(hidden_units, activation='relu')(x)
q_values = layers.Dense(1, activation=None)(x)
self._init_graph_network(inputs, q_values)
super(QFunction, self).build(input_shape)
class ValueFunction(Network):
def __init__(self, hidden_layer_sizes, **kwargs):
super(ValueFunction, self).__init__(**kwargs)
self._hidden_layer_sizes = hidden_layer_sizes
def build(self, input_shape):
inputs = layers.Input(batch_shape=input_shape, name='observations')
x = inputs
for hidden_units in self._hidden_layer_sizes:
x = layers.Dense(hidden_units, activation='relu')(x)
values = layers.Dense(1, activation=None)(x)
self._init_graph_network(inputs, values)
super(ValueFunction, self).build(input_shape)
class GaussianPolicy(Network):
def __init__(self, action_dim, hidden_layer_sizes, reparameterize, **kwargs):
super(GaussianPolicy, self).__init__(**kwargs)
self._action_dim = action_dim
self._f = None
self._hidden_layer_sizes = hidden_layer_sizes
self._reparameterize = reparameterize
def build(self, input_shape):
inputs = layers.Input(batch_shape=input_shape, name='observations')
x = inputs
for hidden_units in self._hidden_layer_sizes:
x = layers.Dense(hidden_units, activation='relu')(x)
mean_and_log_std = layers.Dense(
self._action_dim * 2, activation=None)(x)
def create_distribution_layer(mean_and_log_std):
mean, log_std = tf.split(
mean_and_log_std, num_or_size_splits=2, axis=1)
log_std = tf.clip_by_value(log_std, -20., 2.)
distribution = distributions.MultivariateNormalDiag(
loc=mean,
scale_diag=tf.exp(log_std))
raw_actions = distribution.sample()
if not self._reparameterize:
raw_actions = tf.stop_gradient(raw_actions)
log_probs = distribution.log_prob(raw_actions)
log_probs -= self._squash_correction(raw_actions)
### Problem 2.A
### YOUR CODE HERE
actions = tf.tanh(raw_actions)
#actions = raw_actions
return actions, log_probs
samples, log_probs = layers.Lambda(create_distribution_layer)(
mean_and_log_std)
self._init_graph_network(inputs=inputs, outputs=[samples, log_probs])
super(GaussianPolicy, self).build(input_shape)
def _squash_correction(self, raw_actions):
# https://github.com/haarnoja/sac/blob/master/sac/policies/gaussian_policy.py
return tf.reduce_sum(tf.log(1 - tf.tanh(raw_actions) ** 2 + 1e-6), axis=1)
def eval(self, observation):
assert self.built and observation.ndim == 1
if self._f is None:
self._f = keras.backend.function(self.inputs, [self.outputs[0]])
action, = self._f([observation[None]])
return action.flatten()
|
the-stack_0_8359 | # coding: utf-8
import argparse
import time
import math
import os
import sys
sys.path.append(os.getcwd()) # Fix Python Path
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import joblib
from tqdm import tqdm
import synth_model
from tbptt import (
TBPTT_minibatch_helper,
generate_repeating_sequence,
generate_copy_sequence,
plot_repeating_sequence,
plot_copy_sequence,
)
from tbptt.adaptive_truncation import (
calculate_gradient_norms,
calculate_component_gradient_norms,
adaptive_K_estimate,
log_estimate_grad_norm,
)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# Modified from pytorch/examples/word_language_model on Github
parser = argparse.ArgumentParser(description='PyTorch Synthetic RNN Model')
# I/O args
parser.add_argument("--experiment_id", type=int, default=-1)
parser.add_argument("--experiment_folder", type=str, default='test')
# Data Args
parser.add_argument("--data_name", type=str, default='./data/test',
help="name of dataset to save/load data")
parser.add_argument("--data_type", type=str, default='copy',
help="{'repeat', 'copy'}")
parser.add_argument("--data_lag", type=int, default=10,
help="data 'lag' or memory parameter",
)
parser.add_argument("--data_minlag", type=int, default=5,
help="data minimum 'lag' or memory parameter (for COPY)",
)
parser.add_argument('--emsize', type=int, default=6,
help="dimension of inputs and outputs",
)
# Model Args
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--nhid', type=int, default=20,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
# Training Args
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--optim', type=str, default='SGD',
help='optimizer {SGD (default), ADADELTA, Momentum}')
parser.add_argument('--scale_lr_K', action='store_true',
help='scale learning rate by K')
parser.add_argument('--decay_lr', action='store_true',
help='decay learning rate by 1/sqrt(epoch)')
parser.add_argument('--weight_decay', type=float, default=10**-6,
help='L2 regularization')
parser.add_argument('--clip_grad', type=float, default=1.0,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=210,
help='upper epoch limit')
parser.add_argument('--max_train_time', type=float, default=3*3600,
help='max training time')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size')
parser.add_argument("--tbptt_style", type=str, default='original_buffer',
help="One of (tf, buffer, original, original-buffer, double-tf)")
parser.add_argument('--K', type=int, default=50,
help='TBPTT sequence length')
parser.add_argument('--adaptive_K', action='store_true',
help='use adaptive K')
parser.add_argument('--delta', type=float, default=0.1,
help='adaptive_K relative bias parameter')
parser.add_argument("--beta_estimate_method", type=str, default=None,
help="{'max', 'ols', 'quantile'}")
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=None,
help='random seed')
parser.add_argument('--init_num', type=int, default=0,
help='initial parameters')
parser.add_argument("--init_path", type=str, default=None,
help='init_num path (default is experiment_folder/out/)')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
args = parser.parse_args()
print(args)
# Additional Training CONSTs
TRAIN_SEQ_LEN = 4000
TEST_SEQ_LEN = 1000
TP_SEQ_LEN = 500
MAX_TRAIN_STEPS = 600
MAX_TIME_PER_STEP = 20
MAX_STEPS_PER_STEP = 10
NUM_K_EST_REPS = 20
TAUS_RANGE = np.arange(60, 91, 10)
MIN_K = 2
MAX_K = 100
CHECK_FREQ = 30
GRAD_CHECK_FREQ = 30
GRAD_CHECK_SEQ_LEN = 100
K_EST_SEQ_LEN = 100
# Set the random seed manually for reproducibility.
if args.seed is None:
torch.manual_seed(args.experiment_id)
else:
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
# Setup Paths for Figures + Output
method_name = '{0}_{1}_{2}_{3}_{4}'.format(
args.experiment_id,
args.model,
args.K if not args.adaptive_K else 'adaptive{0}'.format(args.delta),
args.tbptt_style,
args.lr,
)
path_to_out = os.path.join(args.experiment_folder, 'out', method_name)
if not os.path.isdir(path_to_out):
os.makedirs(path_to_out)
joblib.dump(pd.DataFrame([vars(args)]), os.path.join(path_to_out, 'options.p'))
if args.init_path is None:
path_to_init_param_dict = os.path.join(args.experiment_folder, 'out',
"init_{0}.state_dict.pth".format(args.init_num))
else:
path_to_init_param_dict = os.path.join(args.init_path,
"init_{0}.state_dict.pth".format(args.init_num))
path_to_check_param_dict = os.path.join(path_to_out,"checkpoint_state_dict.pth")
###############################################################################
# Load/Generate data
###############################################################################
print("Loading/Generate Data")
path_to_data_folder = os.path.join(args.data_name)
if not os.path.isdir(path_to_data_folder):
os.makedirs(path_to_data_folder)
path_to_data = os.path.join(path_to_data_folder,
'{0}_{1}_{2}.p.gz'.format(args.data_type, args.data_lag, args.emsize))
if os.path.isfile(path_to_data):
print("Loading Data from {0}".format(path_to_data))
data = joblib.load(path_to_data)
else:
print("Generating Data")
if args.data_type == 'repeat':
train_data = generate_repeating_sequence(
seq_len = TRAIN_SEQ_LEN,
batch_size = args.batch_size,
input_size = args.emsize,
output_size = args.emsize,
lag = args.data_lag,
base_seq_length = TRAIN_SEQ_LEN,
)
valid_data = generate_repeating_sequence( seq_len = TEST_SEQ_LEN,
batch_size = args.batch_size,
input_size = args.emsize,
output_size = args.emsize,
lag = args.data_lag,
base_seq_length = TEST_SEQ_LEN,
)
test_data = generate_repeating_sequence(
seq_len = TEST_SEQ_LEN,
batch_size = args.batch_size,
input_size = args.emsize,
output_size = args.emsize,
lag = args.data_lag,
base_seq_length = TEST_SEQ_LEN,
)
elif args.data_type == 'copy':
train_data = generate_copy_sequence(
seq_len = TRAIN_SEQ_LEN,
batch_size = args.batch_size,
num_states = args.emsize,
lag = args.data_lag,
min_lag = args.data_minlag,
)
valid_data = generate_copy_sequence(
seq_len = TEST_SEQ_LEN,
batch_size = args.batch_size,
num_states = args.emsize,
lag = args.data_lag,
min_lag = args.data_minlag,
)
test_data = generate_copy_sequence(
seq_len = TEST_SEQ_LEN,
batch_size = args.batch_size,
num_states = args.emsize,
lag = args.data_lag,
min_lag = args.data_minlag,
)
else:
raise ValueError("Unrecognized 'data_type' {0}".format(
args.data_type))
train_X = torch.tensor(train_data['input_seq'], dtype=torch.float32)
train_Y = torch.tensor(train_data['output_seq'], dtype=torch.long)
valid_X = torch.tensor(valid_data['input_seq'], dtype=torch.float32)
valid_Y = torch.tensor(valid_data['output_seq'], dtype=torch.long)
test_X = torch.tensor(test_data['input_seq'], dtype=torch.float32)
test_Y = torch.tensor(test_data['output_seq'], dtype=torch.long)
data = dict(train_X=train_X, train_Y=train_Y,
valid_X=valid_X, valid_Y=valid_Y,
test_X=test_X, test_Y=test_Y,
)
print("Saving Data to {0}".format(path_to_data))
joblib.dump(data, path_to_data)
# Move data to device
for key in ['train_X', 'train_Y', 'valid_X', 'valid_Y', 'test_X', 'test_Y']:
data[key] = data[key].to(device)
train_data = dict(X = data['train_X'], Y = data['train_Y'])
valid_data = dict(X = data['valid_X'], Y = data['valid_Y'])
test_data = dict(X = data['test_X'], Y = data['test_Y'])
###############################################################################
# Build the model
###############################################################################
print("Setting Up Model Module")
rnn_module = synth_model.RNNModel(args.model, args.emsize, args.emsize, args.nhid, args.nlayers, args.dropout).to(device)
# Copy Initialization between different TBPTT(K) methods
if os.path.isfile(path_to_check_param_dict):
print("Loading Module Parameters from Checkpoing {0}".format(
path_to_check_param_dict))
rnn_module.load_state_dict(torch.load(path_to_check_param_dict))
if os.path.isfile(os.path.join(path_to_out, 'metrics.p.gz')):
metric_df = [joblib.load(os.path.join(path_to_out, 'metrics.p.gz'))]
else:
metric_df = []
elif os.path.isfile(path_to_init_param_dict):
print("Loading Module Init Parameters from {0}".format(
path_to_init_param_dict))
rnn_module.load_state_dict(torch.load(path_to_init_param_dict,
map_location=None if args.cuda else 'cpu',
))
metric_df = []
else:
print("Saving Module Init Parameters to {0}".format(
path_to_init_param_dict))
torch.save(rnn_module.state_dict(), path_to_init_param_dict)
metric_df = []
# Loss + Optimizer
if args.optim == 'SGD':
optimizer = torch.optim.SGD(
rnn_module.parameters(),
lr = args.lr,
weight_decay = args.weight_decay,
)
elif args.optim == 'ADADELTA':
optimizer = torch.optim.Adadelta(
rnn_module.parameters(),
lr = args.lr,
weight_decay = args.weight_decay,
)
elif args.optim == 'Momentum':
optimizer = torch.optim.SGD(
rnn_module.parameters(),
lr = args.lr,
weight_decay = args.weight_decay,
momentum = 0.9,
)
else:
raise ValueError("Unrecognized optim: {0}".format(args.optim))
loss_module = nn.CrossEntropyLoss()
# Minibatch Helper (calls train() + eval())
runner = TBPTT_minibatch_helper(
rnn_module=rnn_module,
loss_module=loss_module,
optimizer=optimizer,
use_buffer=(args.tbptt_style == 'buffer'),
original_style=(args.tbptt_style == 'original'),
original_buffer=(args.tbptt_style == 'original-buffer'),
double_tf=(args.tbptt_style == 'double-tf'),
cuda=args.cuda,
normed=True,
)
###############################################################################
# Training code
###############################################################################
print("Training Loop")
# Helper Functions
def get_batch(source, start, seq_len):
seq_len = min(seq_len, source['X'].shape[0] - start)
X = source['X'][start:start+seq_len]
Y = source['Y'][start:start+seq_len]
return X, Y
def stochastic_subset(source, seq_len):
""" Subsample seq_len uniformly for each element along dim 1 """
shifts=np.random.randint(0, source['X'].shape[0]-seq_len, size=source['X'].shape[1])
subset_X = torch.cat([source['X'][shift:shift+seq_len,ii:ii+1]
for (ii, shift) in enumerate(shifts)], dim=1)
subset_Y = torch.cat([source['Y'][shift:shift+seq_len,ii:ii+1]
for (ii, shift) in enumerate(shifts)], dim=1)
return subset_X, subset_Y
def beta_est_method_to_params(beta_estimate_method):
""" One of the following
'max' or 'ols'
"""
beta_est_kwargs = {
'max': dict(
beta_estimate_method='max',
est_after_log=False,
),
'max-post': dict(
beta_estimate_method='max',
est_after_log=True,
),
'ols': dict(
beta_estimate_method='ols',
est_after_log=False,
),
'ols-post': dict(
beta_estimate_method='ols',
est_after_log=True,
),
'quantile': dict(
beta_estimate_method='quantile',
est_after_log=False,
),
}
if beta_estimate_method not in beta_est_kwargs:
raise ValueError("Unrecognized beta_estimate_method")
return beta_est_kwargs[beta_estimate_method]
def adaptive_K_est(source, runner,
seq_len, burnin,
tau, deltas=[],
beta_estimate_methods=None,
W=None, trun_range=None):
X, Y = stochastic_subset(source, seq_len=seq_len+burnin)
grad_norm, cum_grad_norm = calculate_gradient_norms(
X=X[burnin:],
Y=Y[burnin:],
init_state = runner.rnn_module.get_burnin_init(X[:burnin]),
rnn_module = runner.rnn_module,
loss_module = runner.loss_module,
#tqdm=tqdm,
)
out = dict()
if beta_estimate_methods is None:
beta_estimate_methods = ['ols']
for beta_estimate_method in beta_estimate_methods:
beta_kwargs = beta_est_method_to_params(beta_estimate_method)
K_est = adaptive_K_estimate(grad_norm, cum_grad_norm, tau,
deltas=deltas, **beta_kwargs)
for delta, K in zip(deltas, K_est):
out['{0}_{1}'.format(beta_estimate_method, delta)] = K
return out
def calc_metric_estimate(source, runner,
cur_K,
seq_len, burnin,
tau, deltas=[],
beta_estimate_method=None,
W=None, trun_range=None):
X, Y = stochastic_subset(source, seq_len=seq_len+burnin)
grad_norm, cum_grad_norm = calculate_gradient_norms(
X=X[burnin:],
Y=Y[burnin:],
init_state = runner.rnn_module.get_burnin_init(X[:burnin]),
rnn_module = runner.rnn_module,
loss_module = runner.loss_module,
#tqdm=tqdm,
)
out = dict()
if beta_estimate_method is None:
beta_estimate_methods = ['ols']
else:
beta_estimate_methods = [beta_estimate_method]
for beta_estimate_method in beta_estimate_methods:
beta_kwargs = beta_est_method_to_params(beta_estimate_method)
extra_out = adaptive_K_estimate(grad_norm, cum_grad_norm, tau,
deltas=deltas, extra_out=True, **beta_kwargs)
# K estimates
for delta, K in zip(deltas, extra_out['K_est']):
out['K_{0}'.format(delta)] = K
out['cur_logdelta'] = extra_out['log_rel_error'][cur_K-1]
return out
def checkpoint_grad_plots(runner, source, path_to_figures,
taus = TAUS_RANGE, seq_len=GRAD_CHECK_SEQ_LEN,
start=0, burnin=50):
plt.close('all')
train_X, train_Y = get_batch(
source=source, start=start, seq_len=seq_len+2*burnin,
)
train_init_state = runner.rnn_module.get_burnin_init(train_X[:burnin])
# Component-wise Grad Norm Plots
grad_norms, cum_grad_norms, component_names = calculate_component_gradient_norms(
X=train_X[burnin:],
Y=train_Y[burnin:],
init_state = train_init_state,
rnn_module = runner.rnn_module,
loss_module = runner.loss_module,
#tqdm=tqdm,
)
# Save Grads
joblib.dump(dict(
grad_norms=grad_norms,
cum_grad_norms=cum_grad_norms,
component_names=component_names,
),
os.path.join(path_to_figures, 'grads.p'),
compress=True,
)
# for grad_norm, cum_grad_norm, key in tqdm(
# zip(grad_norms, cum_grad_norms, component_names),
# desc='grad norm plots',
# total=len(component_names)):
for grad_norm, cum_grad_norm, key in zip(grad_norms, cum_grad_norms, component_names):
path_to_grad_norm = os.path.join(path_to_figures, 'grad_{0}'.format(key))
if not os.path.isdir(path_to_grad_norm):
os.makedirs(path_to_grad_norm)
log_grad_norms = np.log(grad_norm + 1e-100)
log_grad_norms[log_grad_norms < -50] = np.NaN
est_abs_bias = np.abs(np.cumsum(grad_norm, axis=0)-np.sum(grad_norm, axis=0))
est_rel_bias = est_abs_bias/(np.cumsum(grad_norm, axis=0)-est_abs_bias)
est_rel_bias[est_rel_bias < 0] = np.nan
# Plot Grad Norms + Log Grad Norm Diff
fig, axes = plt.subplots(2,1)
axes[0].plot(np.diff(log_grad_norms, axis=0)[:-burnin],
color='gray', alpha=0.2)
axes[0].axhline(y=0, linestyle="--", color='black')
axes[0].plot(np.nanmedian(np.diff(log_grad_norms, axis=0), axis=1)[:-burnin],
color='C0')
axes[0].plot(np.nanmean(np.diff(log_grad_norms, axis=0), axis=1)[:-burnin],
color='C1', linestyle="--", alpha=0.9)
axes[0].set_ylabel("Log Grad Norm Diff")
axes[0].set_xlabel("Lag")
axes[1].plot(grad_norm[:-burnin], color='gray', alpha=0.2)
axes[1].plot(np.median(grad_norm, axis=1)[:-burnin], color='C0')
axes[1].plot(np.mean(grad_norm, axis=1)[:-burnin], color='C1',
linestyle='--', alpha=0.9)
axes[1].plot(np.diff(cum_grad_norm)[:-burnin], color='C2')
axes[1].fill_between(x=np.arange(seq_len),
y1 = np.quantile(grad_norm, 0.05, axis=1)[:-burnin],
y2 = np.quantile(grad_norm, 0.95, axis=1)[:-burnin],
color='C0', alpha=0.2)
axes[1].set_yscale('log')
axes[1].set_ylabel("Grad Norm")
axes[1].set_xlabel("Lag")
fig.savefig(os.path.join(path_to_grad_norm, 'grad_plot_{0}.png'.format(key)))
# Estimates of Relative Error
beta_estimate_methods = ['ols']
beta_trun_abs_bias = {}
beta_trun_rel_bias = {}
beta_log_grad_norm = {}
for beta_estimate_method in beta_estimate_methods:
plt.close('all')
beta_kwargs = beta_est_method_to_params(beta_estimate_method)
extra_outs = [
adaptive_K_estimate(grad_norm, cum_grad_norm,
tau=tau,
deltas=[0.1],
trun_range=np.arange(seq_len)+1,
extra_out=True,
**beta_kwargs
)
for tau in taus]
log_est_grad_norm = np.array([log_estimate_grad_norm(
grad_norm=grad_norm,
logbeta = out_['logbeta'],
tau=tau,
trun_range=np.arange(seq_len)+1,
**beta_kwargs
)
for out_, tau in zip(extra_outs, taus)])
log_est_min = np.min(log_est_grad_norm, axis=0)
beta_log_grad_norm[beta_estimate_method] = log_est_min
trun_rel_bias = np.array([np.exp(out_['log_rel_error'])
for out_ in extra_outs])
trun_rel_bias_min = np.min(trun_rel_bias, axis=0)
beta_trun_rel_bias[beta_estimate_method] = trun_rel_bias_min
trun_abs_bias = np.array([
np.exp(out_['log_abs_error'])
for out_ in extra_outs])
trun_abs_bias_min = np.min(trun_abs_bias, axis=0)
beta_trun_abs_bias[beta_estimate_method] = trun_abs_bias_min
# Plot Rel Bias for each beta estimate method
fig, axes = plt.subplots(4,1)
est_rel_bias = 1-np.cumsum(grad_norm, axis=0)/np.sum(grad_norm, axis=0)
est_abs_bias = np.abs(np.cumsum(grad_norm, axis=0)-np.sum(grad_norm, axis=0))
axes[0].plot(np.diff(log_grad_norms, axis=0)[:-burnin], color='gray', alpha=0.1)
for tau, log_est_norm in zip(taus, log_est_grad_norm):
axes[0].plot(np.arange(tau-10+1, len(log_est_norm)),
log_est_norm[tau-10+1:] - log_est_norm[tau-10:-1],
label='tau={0}'.format(tau), linestyle='-', alpha=0.95)
axes[0].legend()
axes[0].axhline(y=0, linestyle="--", color='black')
axes[0].set_ylabel("Log Grad Norm Diff Est")
axes[0].set_xlabel("Lag")
axes[1].plot(np.arange(1,seq_len+1), grad_norm[:-burnin], color='gray', alpha=0.1)
axes[1].plot(np.arange(1,len(log_est_min)+1), np.exp(log_est_min), label='best', color='k', linewidth=3)
for tau, log_est_norm in zip(taus, log_est_grad_norm):
axes[1].plot(np.arange(1, len(log_est_norm)+1),
np.exp(log_est_norm),
label='tau={0}'.format(tau), linestyle='--', alpha=0.7)
axes[1].legend()
axes[1].set_ylabel("Grad Norm Est")
axes[1].set_xlabel("Lag")
axes[1].set_yscale('log')
axes[2].plot(np.arange(1,seq_len+1), est_abs_bias[:-burnin], color='gray', alpha=0.1)
axes[2].plot(np.arange(1,len(trun_abs_bias_min)+1), trun_abs_bias_min, label='best', color='k', linewidth=3)
for tau, trun_abs_bias_ in zip(taus, trun_abs_bias):
axes[2].plot(np.arange(1, len(trun_abs_bias_)+1), trun_abs_bias_,
label='tau={0}'.format(tau), linestyle='--', alpha=0.7)
axes[2].legend()
axes[2].set_ylabel("Est Abs Bias")
axes[2].set_xlabel("Truncation Length")
axes[2].set_yscale('log')
axes[3].plot(np.arange(1,seq_len+1), est_rel_bias[:-burnin], color='gray', alpha=0.1)
axes[3].plot(np.arange(1,len(trun_rel_bias_min)+1),
trun_rel_bias_min, label='best', color='k', linewidth=3)
for tau, trun_rel_bias_ in zip(taus, trun_rel_bias):
axes[3].plot(np.arange(1, len(trun_rel_bias_)+1), trun_rel_bias_,
label='tau={0}'.format(tau), linestyle='--', alpha=0.7)
axes[3].legend()
axes[3].set_ylabel("Est Rel Bias")
axes[3].set_xlabel("Truncation Length")
axes[3].set_yscale('log')
fig.suptitle("Beta Est Method: {0}".format(beta_estimate_method))
fig.set_size_inches(8,14)
fig.savefig(os.path.join(path_to_grad_norm, '{0}.png'.format(
beta_estimate_method)))
# Estimate Relative Error + Gradient Norm for all beta methods
plt.close('all')
fig, axes = plt.subplots(3,1)
axes[0].plot(np.arange(1,seq_len+1), grad_norm[:-burnin], color='gray', alpha=0.1)
for ii, (beta_estimate_method, varphi_hat) in enumerate(beta_log_grad_norm.items()):
axes[0].plot(np.arange(1, len(varphi_hat)+1),
np.exp(varphi_hat),
label='{0}'.format(beta_estimate_method), linestyle='--', alpha=0.7)
axes[0].legend()
axes[0].set_ylabel("Grad Norm Est")
axes[0].set_xlabel("Lag")
axes[0].set_yscale('log')
axes[1].plot(np.arange(1,seq_len+1), est_abs_bias[:-burnin], color='gray', alpha=0.1)
for ii, (beta_estimate_method, trun_bias_) in enumerate(beta_trun_abs_bias.items()):
axes[1].plot(np.arange(1, len(trun_bias_)+1), trun_bias_,
label='{0}'.format(beta_estimate_method), linestyle='--', alpha=0.7)
axes[1].legend()
axes[1].set_ylabel("Est Abs Bias")
axes[1].set_xlabel("Truncation Length")
axes[1].set_yscale('log')
axes[2].plot(np.arange(1,seq_len+1), est_rel_bias[:-burnin], color='gray', alpha=0.1)
for ii, (beta_estimate_method, trun_bias_) in enumerate(beta_trun_rel_bias.items()):
axes[2].plot(np.arange(1, len(trun_bias_)+1), trun_bias_,
label='{0}'.format(beta_estimate_method), linestyle='--', alpha=0.7)
axes[2].legend()
axes[2].set_ylabel("Est Rel Bias")
axes[2].set_xlabel("Truncation Length")
axes[2].set_yscale('log')
fig.set_size_inches(8,12)
fig.savefig(os.path.join(path_to_grad_norm,
'grad_norm_frac_plot_{0}.png'.format(key)))
plt.close('all')
return
def checkpoint_metric_plots(metric_df, path_to_figures, path_to_out):
metric_df = pd.concat(metric_df, ignore_index=True)
joblib.dump(metric_df, os.path.join(path_to_out, 'metrics.p.gz'))
plt.close('all')
df = metric_df.query('epoch > 1')
df = df[~df['metric'].str.contains('hidden|cell')] # filter out var specific K
if df.shape[0] > 0:
g = sns.FacetGrid(col='metric', col_wrap=4, data=df, sharey=False)
g.map_dataframe(sns.lineplot, x='epoch', y='value',
estimator='mean', ci='sd')
g.fig.savefig(os.path.join(path_to_figures, 'metrics.png'))
for ax in g.axes.flatten():
ax.set_xscale('log')
g.fig.savefig(os.path.join(path_to_figures, 'metrics_logx.png'))
plt.close('all')
return
def checkpoint_fit_plots(runner, data, path_to_figures, plot_sequence):
seq_len, burnin = 50, 10
train_X, train_Y = data['train_X'], data['train_Y']
test_X, test_Y = data['test_X'], data['test_Y']
plt.close('all')
train_Yhat = runner.predict(train_X[0:seq_len],
runner.rnn_module.get_default_init(train_X.shape[1]))
fig, ax = plot_sequence(train_Yhat[burnin:, 0], train_Y[burnin:seq_len,0].cpu().numpy())
fig.savefig(os.path.join(path_to_figures, "model_fit_train.png"))
test_Yhat = runner.predict(test_X[0:seq_len],
runner.rnn_module.get_default_init(train_X.shape[1]))
fig, ax = plot_sequence(test_Yhat[burnin:,0], test_Y[burnin:seq_len,0].cpu().numpy())
fig.savefig(os.path.join(path_to_figures, "model_fit_test.png"))
plt.close('all')
return
# Loop Setup
pbar = tqdm(range(MAX_TRAIN_STEPS))
exit_flag = False # Whether to exit
cur_train_time = 0.0 # Train Time Elapsed
if len(metric_df) == 0:
epoch = 0 # Epoch used in training
adaptive_epoch = 0 # Epoch used in training + adaptive K estimation
K = args.K # BPTT size
else:
epoch = metric_df[0].iloc[-1]['epoch']
adaptive_epoch = metric_df[0].iloc[-1]['adaptive_epoch'] - epoch
K = int(metric_df[0].query("metric == 'cur_K'").iloc[-1]['value'])
TP_SEQ_LEN = min([TP_SEQ_LEN, train_data['X'].shape[0]])
cyclic_init = rnn_module.get_default_init(args.batch_size)
cyclic_index = 0
Khat = K # Mean Estimate for cur_K
deltas = [1.0, 0.9, 0.5, 0.1]
if args.adaptive_K:
if args.delta not in deltas:
deltas.append(args.delta)
valid_X, valid_Y = get_batch(valid_data, 0, valid_data['X'].shape[0])
test_X, test_Y = get_batch(test_data, 0, test_data['X'].shape[0])
# Loop
for step in pbar:
# Scale LR
lr = args.lr
if args.scale_lr_K or args.decay_lr:
if args.decay_lr:
lr = lr/np.sqrt(step+1)
if args.scale_lr_K:
lr = lr*np.sqrt(K)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Training Steps
start_time = time.time()
if step > 0:
step_pbar = range(MAX_STEPS_PER_STEP)
for step_step in step_pbar:
# One Step of Optimizer
if cyclic_index > train_data['X'].shape[0]:
print(" Epoch {0} Cycle Complete ".format(int(epoch)))
cyclic_init = rnn_module.get_default_init(args.batch_size)
cyclic_index = 0
tp_seq_len = (TP_SEQ_LEN//K)*K
partial_X, partial_Y = get_batch(train_data,
start=cyclic_index, seq_len=tp_seq_len)
cyclic_index += tp_seq_len
streaming_train_loss, cyclic_init = runner.train(
partial_X, partial_Y, cyclic_init,
K=K,
hidden_out=True,
clip_grad=args.clip_grad,
#tqdm=tqdm,
)
epoch += partial_X.shape[0]/(train_data['X'].shape[0])
if time.time() - start_time > MAX_TIME_PER_STEP:
break
cur_train_time += time.time() - start_time
# Compute Metrics
valid_loss = runner.test(valid_X, valid_Y,
rnn_module.get_default_init(valid_X.shape[1]),
K=100,
#tqdm=tqdm,
)/valid_Y.size(0)
test_loss = runner.test(test_X, test_Y,
rnn_module.get_default_init(test_X.shape[1]),
K=100,
#tqdm=tqdm,
)/test_Y.size(0)
metric_ests = [None] * NUM_K_EST_REPS
for est_rep in range(NUM_K_EST_REPS):
#tqdm(range(NUM_K_EST_REPS), desc="K_est_rep"):
metric_ests[est_rep] = calc_metric_estimate(train_data, runner,
cur_K=int(np.round(Khat)),
seq_len=K_EST_SEQ_LEN, burnin=20, tau=K_EST_SEQ_LEN-20,
deltas=deltas,
)
relK = int(np.round(np.mean([metric_est['K_0.5'] for metric_est in metric_ests])))
if args.adaptive_K:
Khat = np.mean([metric_est['K_{0}'.format(args.delta)] for metric_est in metric_ests])
pbar.set_description(
"Epoch: {0:2.1f}, Valid Loss: {1:4.4f}, Test Loss: {2:4.4f}, Test PPL: {3:4.4f}, Cur K: {4:2d}, Khat: {5:3.2f}, K_0.5: {6:2d}, Train Time: {7:4.2f}, LR: {8:4.2f}".format(
epoch, valid_loss, test_loss, np.exp(test_loss), K, Khat, relK, cur_train_time, lr,
))
metric = [
dict(metric = 'valid_log_ppl', value = valid_loss),
dict(metric = 'test_log_loss', value = test_loss),
dict(metric = 'valid_ppl', value = np.exp(valid_loss)),
dict(metric = 'test_ppl', value = np.exp(test_loss)),
dict(metric = 'cur_K', value = K),
dict(metric = 'Khat', value = Khat),
] + [
dict(metric = key, value=value, rep=rep)
for rep, metric_est in enumerate(metric_ests)
for key, value in metric_est.items()
]
metric = pd.DataFrame(metric).fillna(0)
metric['adaptive_epoch'] = adaptive_epoch + epoch
metric['epoch'] = epoch
metric['time'] = cur_train_time
metric_df.append(metric)
if (cur_train_time > args.max_train_time) or (epoch > args.epochs):
exit_flag = True
# Checkpoints
if (step % CHECK_FREQ == 0) or exit_flag:
epoch_str = str(int(epoch*10)/10)
path_to_figures = os.path.join(args.experiment_folder, 'figures',
method_name, epoch_str)
if not os.path.isdir(path_to_figures):
os.makedirs(path_to_figures)
print("Quick Checkpoint Epoch:{0} ".format(epoch_str))
checkpoint_metric_plots(metric_df, path_to_figures, path_to_out)
plot_sequence = plot_copy_sequence if args.data_type == 'copy' else plot_repeating_sequence
checkpoint_fit_plots(runner, data, path_to_figures, plot_sequence)
if (step % GRAD_CHECK_FREQ == 0) or exit_flag:
checkpoint_grad_plots(runner, train_data, path_to_figures)
# Exit Early (if max time or epoch exceeded)
if exit_flag:
print("Saving Module Parameters to {0}".format(
path_to_check_param_dict))
torch.save(rnn_module.state_dict(), path_to_check_param_dict)
break
# Update Truncation Size (if adaptive)
if args.adaptive_K and step > 0:
start_time = time.time()
seq_len = K_EST_SEQ_LEN
tau = seq_len-20
burnin = 20
K_ests = adaptive_K_est(train_data,
runner,
seq_len=seq_len,
burnin=burnin,
tau=tau,
deltas=[args.delta],
beta_estimate_methods=[args.beta_estimate_method],
)
K = np.max([MIN_K, np.max(list(K_ests.values()))])
K = np.min([K, MAX_K])
adaptive_epoch += (seq_len+burnin)/(train_data['X'].shape[0])
cur_train_time += time.time() - start_time
print("... Done")
# EOF
|
the-stack_0_8362 | import os
import sentry_sdk
from pytest_mock import MockerFixture
from pdf_service import apply_sentry_tags
def test_adds_sentry_tag(mocker: MockerFixture):
mocker.patch("os.environ.items")
mocker.patch("sentry_sdk.set_tag")
os.environ.items.return_value = [('SENTRY_TAG_TEST', 'abc'), ('OTHER_VAR', 'unrelated')]
apply_sentry_tags()
sentry_sdk.set_tag.assert_called_once_with("test", "abc")
def test_adds_sentry_tag_with_multiple_parts(mocker: MockerFixture):
mocker.patch("os.environ.items")
mocker.patch("sentry_sdk.set_tag")
os.environ.items.return_value = [('SENTRY_TAG_LONGER_VALUE', 'the-value')]
apply_sentry_tags()
sentry_sdk.set_tag.assert_called_once_with("longer_value", "the-value")
def test_adds_sentry_tags(mocker: MockerFixture):
mocker.patch("os.environ.items")
mocker.patch("sentry_sdk.set_tag")
os.environ.items.return_value = [('SENTRY_TAG_VALUE_A', 'a'), ('SENTRY_TAG_VALUE_B', 'B')]
apply_sentry_tags()
sentry_sdk.set_tag.assert_any_call("value_a", "a")
sentry_sdk.set_tag.assert_any_call("value_b", "B")
|
the-stack_0_8364 |
class FakeDirEntry:
def __init__(self, path, name, is_directory=True):
self.name = name
self.path = path
self.is_directory = is_directory
def is_dir(self):
return self.is_directory
@staticmethod
def isdir(path):
return True if path == 'mock_path' else False
def scan_dir(dirs=None):
if dirs:
return [FakeDirEntry(dir_[0], dir_[1]) for dir_ in dirs]
return [FakeDirEntry('mock_path', 'mock_dir'), FakeDirEntry('mock_path2', 'mock_file')]
class TestPrivatePacks:
def test_add_private_packs_to_index(self, mocker):
from Tests.private_build import upload_packs_private
dirs = scan_dir()
mocker.patch('os.scandir', return_value=dirs)
mocker.patch('os.path.isdir', side_effect=FakeDirEntry.isdir)
mocker.patch.object(upload_packs_private, 'update_index_folder')
upload_packs_private.add_private_packs_to_index('test', 'private_test')
index_call_args = upload_packs_private.update_index_folder.call_args[0]
index_call_count = upload_packs_private.update_index_folder.call_count
assert index_call_count == 1
assert index_call_args[0] == 'test'
assert index_call_args[1] == 'mock_dir'
assert index_call_args[2] == 'mock_path'
def test_get_private_packs(self, mocker):
import os
from Tests.Marketplace import marketplace_services
from Tests.private_build import upload_packs_private
mocker.patch('glob.glob', return_value=[os.path.join(marketplace_services.CONTENT_ROOT_PATH,
'Tests', 'Marketplace', 'Tests',
'test_data', 'metadata.json')])
private_packs = upload_packs_private.get_private_packs('path', )
assert private_packs == [{'id': 'ImpossibleTraveler',
'price': 100,
'vendorId': 'vendorId',
'partnerId': 'partnerId',
'partnerName': 'partnerName',
'vendorName': 'vendorName',
'contentCommitHash': "",
}]
def test_get_private_packs_empty(self, mocker):
from Tests.private_build import upload_packs_private
mocker.patch('glob.glob', return_value=[])
mocker.patch("Tests.Marketplace.upload_packs.logging.warning")
private_packs = upload_packs_private.get_private_packs('path')
assert private_packs == []
def test_get_private_packs_error(self, mocker):
from Tests.private_build import upload_packs_private
mocker.patch('glob.glob', side_effect=InterruptedError)
mocker.patch("Tests.Marketplace.upload_packs.logging.warning")
private_packs = upload_packs_private.get_private_packs('path')
assert private_packs == []
|
the-stack_0_8366 | from Script.import_emojis import Emojis
from Script.Commands.Messages.Clash_Of_Clans.get_player import player_info, player_troops
async def reaction_add_change_player_stats_page(self, reaction, member):
if (reaction.emoji in [Emojis["Barbarian_king"], Emojis["Battle_machine"], Emojis["Exp"], Emojis["Troop"]]) and ("Player : " in reaction.message.embeds[0].title):
tag = "#" + reaction.message.embeds[0].title.split("#")[len(reaction.message.embeds[0].title.split("#")) - 1].split("(")[0]
if reaction.emoji == Emojis["Barbarian_king"]:
embed = await player_info(member, tag, "main")
if reaction.emoji == Emojis["Battle_machine"]:
embed = await player_info(member, tag, "builder_base")
if reaction.emoji == Emojis["Troop"]:
embed = await player_troops(member, tag)
if reaction.emoji == Emojis["Exp"]:
embed = await player_info(member, tag, "success")
await reaction.message.edit(embed=embed)
await reaction.remove(member)
return
|
the-stack_0_8367 | import numpy as np
import scipy.sparse as sp
class LindbladConstructor:
@staticmethod
def make_Lindblad_instructions(gamma,O):
"""O must be square
"""
II = np.eye(O.shape[0])
Od = np.conjugate(O.T)
leftright = gamma * (-np.dot(Od,O)/2)
return [(gamma*O,Od),(leftright,II),(II,leftright)]
@staticmethod
def make_Lindblad_instructions2(gamma,Oket,Obra):
IIket = np.eye(Oket.shape[0])
IIbra = np.eye(Obra.shape[0])
Oketd = np.conjugate(Oket.T)
Obrad = np.conjugate(Obra.T)
left = gamma * (-np.dot(Oketd,Oket)/2)
right = gamma * (-np.dot(Obrad,Obra)/2)
return [(gamma*Oket,Obrad),(left,IIbra),(IIket,right)]
@staticmethod
def make_Lindblad_instructions2_Obra0(gamma,Oket,Obra):
IIbra = np.eye(Obra.shape[0])
Oketd = np.conjugate(Oket.T)
left = gamma * (-np.dot(Oketd,Oket)/2)
return [(left,IIbra)]
@staticmethod
def make_Lindblad_instructions2_Oket0(gamma,Oket,Obra):
IIket = np.eye(Oket.shape[0])
Obrad = np.conjugate(Obra.T)
right = gamma * (-np.dot(Obrad,Obra)/2)
return [(IIket,right)]
class LiouvillianConstructor(LindbladConstructor):
@staticmethod
def make_commutator_instructions(O):
"""O must be square
"""
II = np.eye(O.shape[0])
return [(O,II),(II,-O)]
@staticmethod
def make_commutator_instructions2(Oket,Obra):
"""
"""
IIket = np.eye(Oket.shape[0])
IIbra = np.eye(Obra.shape[0])
return [(Oket,IIbra),(IIket,-Obra)]
@staticmethod
def make_Liouvillian(instruction_list,*,sparse=False):
if sparse:
kron = sp.kron
else:
kron = np.kron
left, right = instruction_list[0]
L = kron(left,right.T)
for left,right in instruction_list[1:]:
L = L + kron(left,right.T)
return L
|
the-stack_0_8368 | from ufss.UF2 import DensityMatrices
import ufss
import numpy as np
import yaml
import os
import matplotlib.pyplot as plt
from ufss import efieldConvergence
# Fixed parameters
site_energies = [0,1]
site_couplings = [0]
dipoles = [[1,0,0],[1,0,0]]
d = 0
folder = 'UF2_test'
os.makedirs(folder,exist_ok=True)
vibrations = []
# vibrations = [{'displacement':d,'site_label':0,'omega_g':1},
# {'displacement':d,'site_label':1,'omega_g':1.001}]
overdamped_bath = {'cutoff_frequency':1,
'coupling':0.1,
'temperature':0.25,
'cutoff_function':'lorentz-drude',
'spectrum_type':'ohmic'}
bath = {'secular':True,
'site_bath':overdamped_bath,
'vibration_bath':overdamped_bath}
def save_params(trunc_size):
params = {
'site_energies':site_energies,
'site_couplings':site_couplings,
'dipoles':dipoles,
'truncation_size':trunc_size,
'num_eigenvalues':'full',
'eigenvalue_precision':1,
'vibrations':vibrations,
'maximum_manifold':1,
'bath':bath}
with open(os.path.join(folder,'simple_params.yaml'),'w+') as new_file:
yaml.dump(params,new_file)
def run_TA(dt,Delta,*,sigma=1):
TA = DensityMatrices(os.path.join(folder,'open'),detection_type='polarization')
tmax = Delta/2
n = round(tmax/dt)
t = np.arange(-n,n+1/2,1)*dt*sigma
dt = t[1] - t[0]
tmax = t[-1]
ef = ufss.efield_shapes.gaussian(t,sigma)
TA.set_polarization_sequence(['x','x'])
TA.maximum_manifold = 1
TA.set_efields([t,t],[ef,ef],[0,0],[(1,1),(1,0)])
TA.set_t(0.05,dt=1)
T = np.arange(0,52,2)
TA.set_pulse_delays([T])
TA.calculate_signal_all_delays()
return TA
def main():
save_params(1)
ufss.HLG.run(folder)
sigma = 1
dts = np.logspace(-3.2,0,num=20)
Deltas = np.array([4,6,8,10,12,14,20])
f = lambda x,y: run_TA(x,y,sigma=sigma)
c = efieldConvergence(f,dts,Deltas)
print(c.ref_params)
c.run()
print('Minimum M for 1% threshold',c.find_minimum_M(signal_threshold=1E-2))
c.plot()
plt.savefig('efield_convergence_sigma_{:.2f}.png'.format(sigma))
plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_8370 | # Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
import logging
from typing import Optional, Sequence, Union
import torch
from torch.optim import Optimizer
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
def apply_squeeze_excite(
model: torch.nn.Module,
latent_channels: float = 64,
min_channels: int = 128,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
):
"""Adds Squeeze-and-Excitation blocks (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_) after
:class:`~torch.nn.Conv2d` layers.
A Squeeze-and-Excitation block applies global average pooling to the input,
feeds the resulting vector to a single-hidden-layer fully-connected
network (MLP), and uses the output of this MLP as attention coefficients
to rescale the input. This allows the network to take into account global
information about each input, as opposed to only local receptive fields
like in a convolutional layer.
Args:
model (torch.nn.Module): The module to apply squeeze excite replacement.
latent_channels (float, optional): Dimensionality of the hidden layer within the added
MLP. If less than 1, interpreted as a fraction of the number of
output channels in the :class:`~torch.nn.Conv2d` immediately
preceding each Squeeze-and-Excitation block. Default: ``64``.
min_channels (int, optional): An SE block is added after a :class:`~torch.nn.Conv2d`
module ``conv`` only if one of the layer's input or output channels is greater than
this threshold. Default: ``128``.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):
Existing optimizers bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so
they will optimize the correct parameters.
If the optimizer(s) are constructed *after* calling this function,
then it is safe to omit this parameter. These optimizers will see the correct
model parameters.
Returns:
The modified model
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
cf.apply_stochastic_depth(model, target_layer_name='ResNetBottleneck')
"""
def convert_module(module: torch.nn.Module, module_index: int):
assert isinstance(module, torch.nn.Conv2d), "should only be called with conv2d"
if min(module.in_channels, module.out_channels) < min_channels:
return None
return SqueezeExciteConv2d.from_conv2d(module, module_index, latent_channels=latent_channels)
module_surgery.replace_module_classes(model, optimizers=optimizers, policies={torch.nn.Conv2d: convert_module})
return model
class SqueezeExcite2d(torch.nn.Module):
"""Squeeze-and-Excitation block from (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_)
This block applies global average pooling to the input, feeds the resulting
vector to a single-hidden-layer fully-connected network (MLP), and uses the
output of this MLP as attention coefficients to rescale the input. This
allows the network to take into account global information about each input,
as opposed to only local receptive fields like in a convolutional layer.
Args:
num_features (int): Number of features or channels in the input
latent_channels (float, optional): Dimensionality of the hidden layer within the added
MLP. If less than 1, interpreted as a fraction of ``num_features``. Default: ``0.125``.
"""
def __init__(self, num_features: int, latent_channels: float = .125):
super().__init__()
self.latent_channels = int(latent_channels if latent_channels >= 1 else latent_channels * num_features)
flattened_dims = num_features
self.pool_and_mlp = torch.nn.Sequential(torch.nn.AdaptiveAvgPool2d(1), torch.nn.Flatten(),
torch.nn.Linear(flattened_dims, self.latent_channels, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(self.latent_channels, num_features, bias=False),
torch.nn.Sigmoid())
def forward(self, input: torch.Tensor) -> torch.Tensor:
n, c, _, _ = input.shape
attention_coeffs = self.pool_and_mlp(input)
return input * attention_coeffs.reshape(n, c, 1, 1)
class SqueezeExciteConv2d(torch.nn.Module):
"""Helper class used to add a :class:`SqueezeExcite2d` module after a :class:`~torch.nn.Conv2d`."""
def __init__(self, *args, latent_channels: float = 0.125, conv: Optional[torch.nn.Conv2d] = None, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(*args, **kwargs) if conv is None else conv
self.se = SqueezeExcite2d(num_features=self.conv.out_channels, latent_channels=latent_channels)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.se(self.conv(input))
@staticmethod
def from_conv2d(module: torch.nn.Conv2d, module_index: int, latent_channels: float):
return SqueezeExciteConv2d(conv=module, latent_channels=latent_channels)
class SqueezeExcite(Algorithm):
"""Adds Squeeze-and-Excitation blocks (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_) after the
:class:`~torch.nn.Conv2d` modules in a neural network.
Runs on :attr:`~composer.core.event.Event.INIT`. See :class:`SqueezeExcite2d` for more information.
Args:
latent_channels (float, optional): Dimensionality of the hidden layer within the added
MLP. If less than 1, interpreted as a fraction of the number of
output channels in the :class:`~torch.nn.Conv2d` immediately
preceding each Squeeze-and-Excitation block. Default: ``64``.
min_channels (int, optional): An SE block is added after a :class:`~torch.nn.Conv2d`
module ``conv`` only if
``min(conv.in_channels, conv.out_channels) >= min_channels``.
For models that reduce spatial size and increase channel count
deeper in the network, this parameter can be used to only
add SE blocks deeper in the network. This may be desirable
because SE blocks add less overhead when their inputs have
smaller spatial size. Default: ``128``.
"""
def __init__(
self,
latent_channels: float = 64,
min_channels: int = 128,
):
self.latent_channels = latent_channels
self.min_channels = min_channels
def match(self, event: Event, state: State) -> bool:
"""Runs on :attr:`~composer.core.event.Event.INIT`
Args:
event (Event): The current event.
state (State): The current state.
Returns:
bool: True if this algorithm should run no
"""
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Apply the Squeeze-and-Excitation layer replacement.
Args:
event (Event): the current event
state (State): the current trainer state
logger (Logger): the training logger
"""
state.model = apply_squeeze_excite(state.model,
optimizers=state.optimizers,
latent_channels=self.latent_channels,
min_channels=self.min_channels)
layer_count = module_surgery.count_module_instances(state.model, SqueezeExciteConv2d)
log.info(f'Applied SqueezeExcite to model {state.model.__class__.__name__} '
f'with latent_channels={self.latent_channels}, '
f'min_channels={self.min_channels}. '
f'Model now has {layer_count} SqueezeExcite layers.')
logger.data_fit({
'squeeze_excite/num_squeeze_excite_layers': layer_count,
})
|
the-stack_0_8371 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'gram_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
the-stack_0_8372 | """test_2_2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "TEST 2"
admin.site.site_title = "TEST 2 Admin Portal"
admin.site.index_title = "TEST 2 Admin"
# swagger
api_info = openapi.Info(
title="TEST 2 API",
default_version="v1",
description="API documentation for TEST 2 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
the-stack_0_8373 | # -*- coding: utf-8 -*-
import os
import sqlite3
import configparser
class FirefoxSessionCookieAuth:
'''Uses a Firefox session for authentication.'''
token_name = 'seraph.confluence'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__obtain_token_key_from_firefox()
def __call__(self, request):
request.prepare_cookies({self.token_name: self.__token_key})
return request
def __obtain_token_key_from_firefox(self):
profiles_ini_path = os.path.expanduser(
'~/.mozilla/firefox/profiles.ini')
default_profile_path = self.__find_default_profile_path(
ini_path=profiles_ini_path)
cookie_file_path = os.path.join(default_profile_path, 'cookies.sqlite')
self.__token_key = self.__retrieve_token_key(
db_path=cookie_file_path,
name=self.token_name)
@staticmethod
def __find_default_profile_path(ini_path):
parser = configparser.SafeConfigParser()
parser.read(ini_path)
for section in parser.sections():
if parser.has_option(section, 'Default'):
try:
path = parser.get(section, 'Path')
if parser.getboolean(section, 'IsRelative'):
path = os.path.join(os.path.dirname(ini_path), path)
return os.path.abspath(os.path.expanduser(path))
except configparser.NoOptionError:
pass
@staticmethod
def __retrieve_token_key(db_path, name):
'''Retrieve key from the Firefox cookie database.'''
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
rows = cursor.execute(
'SELECT value FROM moz_cookies WHERE name == "{name}"'.format(
name=name))
value = next(rows)[0]
return value
|
the-stack_0_8374 | import collections
import hashlib
import hmac
import jsonpatch
import os
import re
import time
from base64 import b64decode, b64encode
from binascii import hexlify
from urllib.parse import unquote
from enum import Enum
import ujson as json
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
try:
import memcache
except ImportError: # pragma: no cover
memcache = None
from pyramid import httpexceptions
from pyramid.interfaces import IRoutesMapper
from pyramid.request import Request, apply_request_extensions
from pyramid.security import Authenticated
from pyramid.settings import aslist
from pyramid.view import render_view_to_response
from cornice import cors
from colander import null
def json_serializer(v, **kw):
return json.dumps(v, escape_forward_slashes=False)
def strip_whitespace(v):
"""Remove whitespace, newlines, and tabs from the beginning/end
of a string.
:param str v: the string to strip.
:rtype: str
"""
return v.strip(" \t\n\r") if v is not null else v
def msec_time():
"""Return current epoch time in milliseconds.
:rtype: int
"""
return int(time.time() * 1000.0) # floor
def classname(obj):
"""Get a classname from an object.
:rtype: str
"""
return obj.__class__.__name__.lower()
def merge_dicts(a, b):
"""Merge b into a recursively, without overwriting values.
:param dict a: the dict that will be altered with values of `b`.
"""
for k, v in b.items():
if isinstance(v, dict):
merge_dicts(a.setdefault(k, {}), v)
else:
a.setdefault(k, v)
def recursive_update_dict(root, changes, ignores=()):
"""Update recursively all the entries from a dict and it's children dicts.
:param dict root: root dictionary
:param dict changes: dictonary where changes should be made (default=root)
:returns dict newd: dictionary with removed entries of val.
"""
if isinstance(changes, dict):
for k, v in changes.items():
if isinstance(v, dict):
if k not in root:
root[k] = {}
recursive_update_dict(root[k], v, ignores)
elif v in ignores:
if k in root:
root.pop(k)
else:
root[k] = v
def random_bytes_hex(bytes_length):
"""Return a hexstring of bytes_length cryptographic-friendly random bytes.
:param int bytes_length: number of random bytes.
:rtype: str
"""
return hexlify(os.urandom(bytes_length)).decode("utf-8")
def native_value(value):
"""Convert string value to native python values.
:param str value: value to interprete.
:returns: the value coerced to python type
"""
if isinstance(value, str):
try:
value = json.loads(value)
except ValueError:
return value
return value
def read_env(key, value):
"""Read the setting key from environment variables.
:param key: the setting name
:param value: default value if undefined in environment
:returns: the value from environment, coerced to python type
"""
envkey = key.replace(".", "_").replace("-", "_").upper()
return native_value(os.getenv(envkey, value))
def encode64(content, encoding="utf-8"):
"""Encode some content in base64.
:rtype: str
"""
return b64encode(content.encode(encoding)).decode(encoding)
def decode64(encoded_content, encoding="utf-8"):
"""Decode some base64 encoded content.
:rtype: str
"""
return b64decode(encoded_content.encode(encoding)).decode(encoding)
def hmac_digest(secret, message, encoding="utf-8"):
"""Return hex digest of a message HMAC using secret"""
if isinstance(secret, str):
secret = secret.encode(encoding)
return hmac.new(secret, message.encode(encoding), hashlib.sha256).hexdigest()
def dict_subset(d, keys):
"""Return a dict with the specified keys"""
result = {}
for key in keys:
if "." in key:
field, subfield = key.split(".", 1)
if isinstance(d.get(field), collections.Mapping):
subvalue = dict_subset(d[field], [subfield])
result[field] = dict_merge(subvalue, result.get(field, {}))
elif field in d:
result[field] = d[field]
else:
if key in d:
result[key] = d[key]
return result
def dict_merge(a, b):
"""Merge the two specified dicts"""
result = dict(**b)
for key, value in a.items():
if isinstance(value, collections.Mapping):
value = dict_merge(value, result.setdefault(key, {}))
result[key] = value
return result
def find_nested_value(d, path, default=None):
"""Finds a nested value in a dict from a dotted path key string.
:param dict d: the dict to retrieve nested value from
:param str path: the path to the nested value, in dot notation
:returns: the nested value if any was found, or None
"""
if path in d:
return d.get(path)
# the challenge is to identify what is the root key, as dict keys may
# contain dot characters themselves
parts = path.split(".")
# build a list of all possible root keys from all the path parts
candidates = [".".join(parts[: i + 1]) for i in range(len(parts))]
# we start with the longest candidate paths as they're most likely to be the
# ones we want if they match
root = next((key for key in reversed(candidates) if key in d), None)
# if no valid root candidates were found, the path is invalid; abandon
if root is None or not isinstance(d.get(root), dict):
return default
# we have our root key, extract the new subpath and recur
subpath = path.replace(root + ".", "", 1)
return find_nested_value(d.get(root), subpath, default=default)
class COMPARISON(Enum):
LT = "<"
MIN = ">="
MAX = "<="
NOT = "!="
EQ = "=="
GT = ">"
IN = "in"
EXCLUDE = "exclude"
LIKE = "like"
HAS = "has"
# The order matters here because we want to match
# contains_any before contains_
CONTAINS_ANY = "contains_any"
CONTAINS = "contains"
def reapply_cors(request, response):
"""Reapply cors headers to the new response with regards to the request.
We need to re-apply the CORS checks done by Cornice, in case we're
recreating the response from scratch.
"""
service = request.current_service
if service:
request.info["cors_checked"] = False
cors.apply_cors_post_request(service, request, response)
response = cors.ensure_origin(service, request, response)
else:
# No existing service is concerned, and Cornice is not implied.
origin = request.headers.get("Origin")
if origin:
settings = request.registry.settings
allowed_origins = set(aslist(settings["cors_origins"]))
required_origins = {"*", origin}
if allowed_origins.intersection(required_origins):
response.headers["Access-Control-Allow-Origin"] = origin
# Import service here because kinto.core import utils
from kinto.core import Service
if Service.default_cors_headers: # pragma: no branch
headers = ",".join(Service.default_cors_headers)
response.headers["Access-Control-Expose-Headers"] = headers
return response
def log_context(request, **kwargs):
"""Bind information to the current request summary log.
"""
non_empty = {k: v for k, v in kwargs.items() if v is not None}
try:
request._log_context.update(**non_empty)
except AttributeError:
request._log_context = non_empty
return request._log_context
def current_service(request):
"""Return the Cornice service matching the specified request.
:returns: the service or None if unmatched.
:rtype: cornice.Service
"""
if request.matched_route:
services = request.registry.cornice_services
pattern = request.matched_route.pattern
try:
service = services[pattern]
except KeyError:
return None
else:
return service
def current_resource_name(request):
"""Return the name used when the kinto.core resource was registered along its
viewset.
:returns: the resource identifier.
:rtype: str
"""
service = current_service(request)
resource_name = service.viewset.get_name(service.resource)
return resource_name
def prefixed_userid(request):
"""In Kinto users ids are prefixed with the policy name that is
contained in Pyramid Multiauth.
If a custom authn policy is used, without authn_type, this method returns
the user id without prefix.
"""
# If pyramid_multiauth is used, a ``authn_type`` is set on request
# when a policy succesfully authenticates a user.
# (see :func:`kinto.core.initialization.setup_authentication`)
authn_type = getattr(request, "authn_type", None)
if authn_type is not None:
return "{}:{}".format(authn_type, request.selected_userid)
def prefixed_principals(request):
"""
:returns: the list principals with prefixed user id.
"""
principals = request.effective_principals
if Authenticated not in principals:
return principals
# Remove unprefixed user id on effective_principals to avoid conflicts.
# (it is added via Pyramid Authn policy effective principals)
prefix, userid = request.prefixed_userid.split(":", 1)
principals = [p for p in principals if p != userid]
if request.prefixed_userid not in principals:
principals = [request.prefixed_userid] + principals
return principals
def build_request(original, dict_obj):
"""
Transform a dict object into a :class:`pyramid.request.Request` object.
It sets a ``parent`` attribute on the resulting request assigned with
the `original` request specified.
:param original: the original request.
:param dict_obj: a dict object with the sub-request specifications.
"""
api_prefix = "/{}".format(original.upath_info.split("/")[1])
path = dict_obj["path"]
if not path.startswith(api_prefix):
path = api_prefix + path
path = path.encode("utf-8")
method = dict_obj.get("method") or "GET"
headers = dict(original.headers)
headers.update(**dict_obj.get("headers") or {})
# Body can have different length, do not use original header.
headers.pop("Content-Length", None)
payload = dict_obj.get("body") or ""
# Payload is always a dict (from ``BatchRequestSchema.body``).
# Send it as JSON for subrequests.
if isinstance(payload, dict):
headers["Content-Type"] = "application/json; charset=utf-8"
payload = json.dumps(payload)
request = Request.blank(
path=path.decode("latin-1"), headers=headers, POST=payload, method=method
)
request.registry = original.registry
apply_request_extensions(request)
# This is used to distinguish subrequests from direct incoming requests.
# See :func:`kinto.core.initialization.setup_logging()`
request.parent = original
return request
def build_response(response, request):
"""
Transform a :class:`pyramid.response.Response` object into a serializable
dict.
:param response: a response object, returned by Pyramid.
:param request: the request that was used to get the response.
"""
dict_obj = {}
dict_obj["path"] = unquote(request.path)
dict_obj["status"] = response.status_code
dict_obj["headers"] = dict(response.headers)
body = ""
if request.method != "HEAD":
# XXX : Pyramid should not have built response body for HEAD!
try:
body = response.json
except ValueError:
body = response.body
dict_obj["body"] = body
return dict_obj
def follow_subrequest(request, subrequest, **kwargs):
"""Run a subrequest (e.g. batch), and follow the redirection if any.
:rtype: tuple
:returns: the reponse and the redirection request (or `subrequest`
if no redirection happened.)
"""
try:
try:
return request.invoke_subrequest(subrequest, **kwargs), subrequest
except Exception as e:
resp = render_view_to_response(e, subrequest)
if not resp or resp.status_code >= 500:
raise e
raise resp
except httpexceptions.HTTPRedirection as e:
new_location = e.headers["Location"]
new_request = Request.blank(
path=new_location,
headers=subrequest.headers,
POST=subrequest.body,
method=subrequest.method,
)
new_request.bound_data = subrequest.bound_data
new_request.parent = getattr(subrequest, "parent", None)
return request.invoke_subrequest(new_request, **kwargs), new_request
def strip_uri_prefix(path):
"""
Remove potential version prefix in URI.
"""
return re.sub(r"^(/v\d+)?", "", str(path))
def view_lookup(request, uri):
"""
A convenience method for view_lookup_registry when you have a request.
:param request: the current request (used to obtain registry).
:param uri: a plural or object endpoint URI.
:rtype: tuple
:returns: the resource name and the associated matchdict.
"""
return view_lookup_registry(request.registry, uri)
def view_lookup_registry(registry, uri):
"""
Look-up the specified `uri` and return the associated resource name
along the match dict.
:param registry: the application's registry.
:param uri: a plural or object endpoint URI.
:rtype: tuple
:returns: the resource name and the associated matchdict.
"""
api_prefix = "/{}".format(registry.route_prefix)
path = api_prefix + uri
q = registry.queryUtility
routes_mapper = q(IRoutesMapper)
fakerequest = Request.blank(path=path)
info = routes_mapper(fakerequest)
matchdict, route = info["match"], info["route"]
if route is None:
raise ValueError("URI has no route")
resource_name = route.name.replace("-record", "").replace("-collection", "")
return resource_name, matchdict
def instance_uri(request, resource_name, **params):
"""Return the URI for the given resource."""
return strip_uri_prefix(request.route_path("{}-record".format(resource_name), **params))
def instance_uri_registry(registry, resource_name, **params):
"""Return the URI for the given resource, even if you don't have a request.
This gins up a request using Request.blank and so does not support
any routes with pregenerators.
"""
request = Request.blank(path="")
request.registry = registry
return instance_uri(request, resource_name, **params)
def parse_resource(resource):
"""Extract the bucket_id and collection_id of the given resource (URI)
:param str resource: a uri formatted /buckets/<bid>/collections/<cid> or <bid>/<cid>.
:returns: a dictionary with the bucket_id and collection_id of the resource
"""
error_msg = "Resources should be defined as "
"'/buckets/<bid>/collections/<cid>' or '<bid>/<cid>'. "
"with valid collection and bucket ids."
from kinto.views import NameGenerator
id_generator = NameGenerator()
parts = resource.split("/")
if len(parts) == 2:
bucket, collection = parts
elif len(parts) == 5:
_, _, bucket, _, collection = parts
else:
raise ValueError(error_msg)
if bucket == "" or collection == "":
raise ValueError(error_msg)
if not id_generator.match(bucket) or not id_generator.match(collection):
raise ValueError(error_msg)
return {"bucket": bucket, "collection": collection}
def apply_json_patch(record, ops):
"""
Apply JSON Patch operations using jsonpatch.
:param record: base record where changes should be applied (not in-place).
:param list changes: list of JSON patch operations.
:param bool only_data: param to limit the scope of the patch only to 'data'.
:returns dict data: patched record data.
dict permissions: patched record permissions
"""
data = {**record}
# Permissions should always have read and write fields defined (to allow add)
permissions = {"read": set(), "write": set()}
# Get permissions if available on the resource (using SharableResource)
permissions.update(data.pop("__permissions__", {}))
# Permissions should be mapped as a dict, since jsonpatch doesn't accept
# sets and lists are mapped as JSON arrays (not indexed by value)
permissions = {k: {i: i for i in v} for k, v in permissions.items()}
resource = {"data": data, "permissions": permissions}
# Allow patch permissions without value since key and value are equal on sets
for op in ops:
# 'path' is here since it was validated.
if op["path"].startswith(("/permissions/read/", "/permissions/write/")):
op["value"] = op["path"].split("/")[-1]
try:
result = jsonpatch.apply_patch(resource, ops)
except (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException) as e:
raise ValueError(e)
return result
|
the-stack_0_8376 |
def from_dynamodb_raw(item):
result = {}
for key in item:
value = item[key]
if 'S' in value:
result[key] = value['S']
elif 'N' in value:
result[key] = value['N']
else:
raise Exception('unmapped kind {}'.format(value))
return result
def to_dynamodb_raw(item):
result = {}
wrapped_dict = item.__dict__ if item.__dict__ is not None else item
for key in wrapped_dict:
value = wrapped_dict[key]
if type(value) is str:
result[key] = { 'S': value }
elif type(value) is int or type(value) is float:
result[key] = { 'N': value }
elif value is None:
pass
else:
raise Exception('unmapped kind {}'.format(type(value)))
return result
|
the-stack_0_8379 | import gc
import os
import math
import random
import warnings
import albumentations as A
import colorednoise as cn
import cv2
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
import timm
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as torchdata
from pathlib import Path
from typing import List
from albumentations.pytorch import ToTensorV2
from albumentations.core.transforms_interface import ImageOnlyTransform
from catalyst.core import Callback, CallbackOrder, IRunner
from catalyst.dl import Runner, SupervisedRunner
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from sklearn import model_selection
from sklearn import metrics
from timm.models.layers import SelectAdaptivePool2d
from torch.optim.optimizer import Optimizer
from torchlibrosa.stft import LogmelFilterBank, Spectrogram
from torchlibrosa.augmentation import SpecAugmentation
from tqdm import tqdm
# =================================================
# Config #
# =================================================
class CFG:
######################
# Globals #
######################
seed = 1213
epochs = 55
train = True
folds = [0]
img_size = 224
main_metric = "epoch_f1_at_05"
minimize_metric = False
######################
# Data #
######################
train_datadir = Path("../input/birdclef-2021/train_short_audio")
train_csv = "../input/birdclef-2021/train_metadata.csv"
train_soundscape = "../input/birdclef-2021/train_soundscape_labels.csv"
birdcall_datadir = Path(
"../input/birdsong-recognition/train_audio_resampled")
birdcall_csv = "../input/birdsong-recognition/train_resampled_extended.csv"
######################
# Dataset #
######################
transforms = {
"train": [{"name": "Normalize"}, {"name": "PinkNoise", "params": {"min_snr": 5}}],
"valid": [{"name": "Normalize"}]
}
period = 20
n_mels = 224
fmin = 300
fmax = 16000
n_fft = 2048
hop_length = 768
sample_rate = 32000
melspectrogram_parameters = {
"n_mels": 224,
"fmin": 20,
"fmax": 16000
}
target_columns = [
'acafly', 'acowoo', 'aldfly', 'ameavo', 'amecro',
'amegfi', 'amekes', 'amepip', 'amered', 'amerob',
'amewig', 'amtspa', 'andsol1', 'annhum', 'astfly',
'azaspi1', 'babwar', 'baleag', 'balori', 'banana',
'banswa', 'banwre1', 'barant1', 'barswa', 'batpig1',
'bawswa1', 'bawwar', 'baywre1', 'bbwduc', 'bcnher',
'belkin1', 'belvir', 'bewwre', 'bkbmag1', 'bkbplo',
'bkbwar', 'bkcchi', 'bkhgro', 'bkmtou1', 'bknsti', 'blbgra1',
'blbthr1', 'blcjay1', 'blctan1', 'blhpar1', 'blkpho',
'blsspa1', 'blugrb1', 'blujay', 'bncfly', 'bnhcow', 'bobfly1',
'bongul', 'botgra', 'brbmot1', 'brbsol1', 'brcvir1', 'brebla',
'brncre', 'brnjay', 'brnthr', 'brratt1', 'brwhaw', 'brwpar1',
'btbwar', 'btnwar', 'btywar', 'bucmot2', 'buggna', 'bugtan',
'buhvir', 'bulori', 'burwar1', 'bushti', 'butsal1', 'buwtea',
'cacgoo1', 'cacwre', 'calqua', 'caltow', 'cangoo', 'canwar',
'carchi', 'carwre', 'casfin', 'caskin', 'caster1', 'casvir',
'categr', 'ccbfin', 'cedwax', 'chbant1', 'chbchi', 'chbwre1',
'chcant2', 'chispa', 'chswar', 'cinfly2', 'clanut', 'clcrob',
'cliswa', 'cobtan1', 'cocwoo1', 'cogdov', 'colcha1', 'coltro1',
'comgol', 'comgra', 'comloo', 'commer', 'compau', 'compot1',
'comrav', 'comyel', 'coohaw', 'cotfly1', 'cowscj1', 'cregua1',
'creoro1', 'crfpar', 'cubthr', 'daejun', 'dowwoo', 'ducfly', 'dusfly',
'easblu', 'easkin', 'easmea', 'easpho', 'eastow', 'eawpew', 'eletro',
'eucdov', 'eursta', 'fepowl', 'fiespa', 'flrtan1', 'foxspa', 'gadwal',
'gamqua', 'gartro1', 'gbbgul', 'gbwwre1', 'gcrwar', 'gilwoo',
'gnttow', 'gnwtea', 'gocfly1', 'gockin', 'gocspa', 'goftyr1',
'gohque1', 'goowoo1', 'grasal1', 'grbani', 'grbher3', 'grcfly',
'greegr', 'grekis', 'grepew', 'grethr1', 'gretin1', 'greyel',
'grhcha1', 'grhowl', 'grnher', 'grnjay', 'grtgra', 'grycat',
'gryhaw2', 'gwfgoo', 'haiwoo', 'heptan', 'hergul', 'herthr',
'herwar', 'higmot1', 'hofwoo1', 'houfin', 'houspa', 'houwre',
'hutvir', 'incdov', 'indbun', 'kebtou1', 'killde', 'labwoo', 'larspa',
'laufal1', 'laugul', 'lazbun', 'leafly', 'leasan', 'lesgol', 'lesgre1',
'lesvio1', 'linspa', 'linwoo1', 'littin1', 'lobdow', 'lobgna5', 'logshr',
'lotduc', 'lotman1', 'lucwar', 'macwar', 'magwar', 'mallar3', 'marwre',
'mastro1', 'meapar', 'melbla1', 'monoro1', 'mouchi', 'moudov', 'mouela1',
'mouqua', 'mouwar', 'mutswa', 'naswar', 'norcar', 'norfli', 'normoc', 'norpar',
'norsho', 'norwat', 'nrwswa', 'nutwoo', 'oaktit', 'obnthr1', 'ocbfly1',
'oliwoo1', 'olsfly', 'orbeup1', 'orbspa1', 'orcpar', 'orcwar', 'orfpar',
'osprey', 'ovenbi1', 'pabspi1', 'paltan1', 'palwar', 'pasfly', 'pavpig2',
'phivir', 'pibgre', 'pilwoo', 'pinsis', 'pirfly1', 'plawre1', 'plaxen1',
'plsvir', 'plupig2', 'prowar', 'purfin', 'purgal2', 'putfru1', 'pygnut',
'rawwre1', 'rcatan1', 'rebnut', 'rebsap', 'rebwoo', 'redcro', 'reevir1',
'rehbar1', 'relpar', 'reshaw', 'rethaw', 'rewbla', 'ribgul', 'rinkin1',
'roahaw', 'robgro', 'rocpig', 'rotbec', 'royter1', 'rthhum', 'rtlhum',
'ruboro1', 'rubpep1', 'rubrob', 'rubwre1', 'ruckin', 'rucspa1', 'rucwar',
'rucwar1', 'rudpig', 'rudtur', 'rufhum', 'rugdov', 'rumfly1', 'runwre1',
'rutjac1', 'saffin', 'sancra', 'sander', 'savspa', 'saypho', 'scamac1',
'scatan', 'scbwre1', 'scptyr1', 'scrtan1', 'semplo', 'shicow', 'sibtan2',
'sinwre1', 'sltred', 'smbani', 'snogoo', 'sobtyr1', 'socfly1', 'solsan',
'sonspa', 'soulap1', 'sposan', 'spotow', 'spvear1', 'squcuc1', 'stbori',
'stejay', 'sthant1', 'sthwoo1', 'strcuc1', 'strfly1', 'strsal1', 'stvhum2',
'subfly', 'sumtan', 'swaspa', 'swathr', 'tenwar', 'thbeup1', 'thbkin',
'thswar1', 'towsol', 'treswa', 'trogna1', 'trokin', 'tromoc', 'tropar',
'tropew1', 'tuftit', 'tunswa', 'veery', 'verdin', 'vigswa', 'warvir',
'wbwwre1', 'webwoo1', 'wegspa1', 'wesant1', 'wesblu', 'weskin', 'wesmea',
'westan', 'wewpew', 'whbman1', 'whbnut', 'whcpar', 'whcsee1', 'whcspa',
'whevir', 'whfpar1', 'whimbr', 'whiwre1', 'whtdov', 'whtspa', 'whwbec1',
'whwdov', 'wilfly', 'willet1', 'wilsni1', 'wiltur', 'wlswar', 'wooduc',
'woothr', 'wrenti', 'y00475', 'yebcha', 'yebela1', 'yebfly', 'yebori1',
'yebsap', 'yebsee1', 'yefgra1', 'yegvir', 'yehbla', 'yehcar1', 'yelgro',
'yelwar', 'yeofly1', 'yerwar', 'yeteup1', 'yetvir']
######################
# Loaders #
######################
loader_params = {
"train": {
"batch_size": 64,
"num_workers": 20,
"shuffle": True
},
"valid": {
"batch_size": 64,
"num_workers": 20,
"shuffle": False
},
"test": {
"batch_size": 64,
"num_workers": 20,
"shuffle": False
}
}
######################
# Split #
######################
split = "StratifiedKFold"
split_params = {
"n_splits": 5,
"shuffle": True,
"random_state": 1213
}
######################
# Model #
######################
base_model_name = "tf_efficientnet_b0_ns"
pooling = "max"
pretrained = True
num_classes = 397
in_channels = 1
######################
# Criterion #
######################
loss_name = "BCEFocal2WayLoss"
loss_params: dict = {}
######################
# Optimizer #
######################
optimizer_name = "Adam"
base_optimizer = "Adam"
optimizer_params = {
"lr": 0.001
}
# For SAM optimizer
base_optimizer = "Adam"
######################
# Scheduler #
######################
scheduler_name = "CosineAnnealingLR"
scheduler_params = {
"T_max": 10
}
# =================================================
# Utilities #
# =================================================
def set_seed(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_device() -> torch.device:
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def init_logger(log_file='train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def prepare_model_for_inference(model, path: Path):
if not torch.cuda.is_available():
ckpt = torch.load(path, map_location="cpu")
else:
ckpt = torch.load(path)
model.load_state_dict(ckpt["model_state_dict"])
model.eval()
return model
# =================================================
# Split #
# =================================================
def get_split():
if hasattr(model_selection, CFG.split):
return model_selection.__getattribute__(CFG.split)(**CFG.split_params)
else:
return MultilabelStratifiedKFold(**CFG.split_params)
# =================================================
# Dataset #
# =================================================
def normalize_melspec(X: np.ndarray):
eps = 1e-6
mean = X.mean()
X = X - mean
std = X.std()
Xstd = X / (std + eps)
norm_min, norm_max = Xstd.min(), Xstd.max()
if (norm_max - norm_min) > eps:
V = Xstd
V[V < norm_min] = norm_min
V[V > norm_max] = norm_max
V = 255 * (V - norm_min) / (norm_max - norm_min)
V = V.astype(np.uint8)
else:
# Just zero
V = np.zeros_like(Xstd, dtype=np.uint8)
return V
class WaveformDataset(torchdata.Dataset):
def __init__(self,
df: pd.DataFrame,
img_size=224,
waveform_transforms=None,
period=20,
validation=False):
self.df = df
self.img_size = img_size
self.waveform_transforms = waveform_transforms
self.period = period
self.validation = validation
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
sample = self.df.loc[idx, :]
datadir = Path(sample["datadir"])
wav_name = sample["filename"]
ebird_code = sample["primary_label"]
secondary_labels = eval(sample["secondary_labels"])
y, sr = sf.read(datadir / ebird_code / wav_name)
len_y = len(y)
effective_length = sr * self.period
if len_y < effective_length:
new_y = np.zeros(effective_length, dtype=y.dtype)
if not self.validation:
start = np.random.randint(effective_length - len_y)
else:
start = 0
new_y[start:start + len_y] = y
y = new_y.astype(np.float32)
elif len_y > effective_length:
if not self.validation:
start = np.random.randint(len_y - effective_length)
else:
start = 0
y = y[start:start + effective_length].astype(np.float32)
else:
y = y.astype(np.float32)
y = np.nan_to_num(y)
if np.isnan(y).any():
y = np.zeros(len(y))
if self.waveform_transforms:
y = self.waveform_transforms(y)
y = np.nan_to_num(y)
labels = np.zeros(len(CFG.target_columns), dtype=float)
labels[CFG.target_columns.index(ebird_code)] = 1.0
mask = np.ones(len(CFG.target_columns), dtype=float)
for secondary_label in secondary_labels:
if secondary_label in CFG.target_columns:
mask[CFG.target_columns.index(secondary_label)] = 0.0
return {
"image": y,
"targets": labels,
"mask": mask
}
class SingleChannelDataset(torchdata.Dataset):
def __init__(self,
df: pd.DataFrame,
datadir: Path,
img_size=224,
waveform_transforms=None,
spectrogram_transforms=None,
melspectrogram_parameters={},
period=20,
validation=False):
self.df = df
self.datadir = datadir
self.img_size = img_size
self.waveform_transforms = waveform_transforms
self.spectrogram_transforms = spectrogram_transforms
self.melspectrogram_parameters = melspectrogram_parameters
self.period = period
self.validation = validation
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
sample = self.df.loc[idx, :]
wav_name = sample["filename"]
ebird_code = sample["primary_label"]
y, sr = sf.read(self.datadir / ebird_code / wav_name)
len_y = len(y)
effective_length = sr * self.period
if len_y < effective_length:
new_y = np.zeros(effective_length, dtype=y.dtype)
if not self.validation:
start = np.random.randint(effective_length - len_y)
else:
start = 0
new_y[start:start + len_y] = y
y = new_y.astype(np.float32)
elif len_y > effective_length:
if not self.validation:
start = np.random.randint(len_y - effective_length)
else:
start = 0
y = y[start:start + effective_length].astype(np.float32)
else:
y = y.astype(np.float32)
if self.waveform_transforms:
y = self.waveform_transforms(y)
melspec = librosa.feature.melspectrogram(
y, sr=sr, **self.melspectrogram_parameters)
melspec = librosa.power_to_db(melspec)
if self.spectrogram_transforms:
melspec = self.spectrogram_transforms(image=melspec)["image"]
norm_melspec = normalize_melspec(melspec)
height, width = norm_melspec.shape
image = cv2.resize(
norm_melspec, (int(width * self.img_size / height), self.img_size))
height, width = image.shape
image = image.reshape(1, height, width)
image = (image / 255.0).astype(np.float32)
labels = np.zeros(len(CFG.target_columns), dtype=float)
labels[CFG.target_columns.index(ebird_code)] = 1.0
return {
"image": image,
"targets": labels
}
# =================================================
# Transforms #
# =================================================
def get_transforms(phase: str):
transforms = CFG.transforms
if transforms is None:
return None
else:
if transforms[phase] is None:
return None
trns_list = []
for trns_conf in transforms[phase]:
trns_name = trns_conf["name"]
trns_params = {} if trns_conf.get("params") is None else \
trns_conf["params"]
if globals().get(trns_name) is not None:
trns_cls = globals()[trns_name]
trns_list.append(trns_cls(**trns_params))
if len(trns_list) > 0:
return Compose(trns_list)
else:
return None
def get_waveform_transforms(config: dict, phase: str):
return get_transforms(config, phase)
def get_spectrogram_transforms(config: dict, phase: str):
transforms = config.get('spectrogram_transforms')
if transforms is None:
return None
else:
if transforms[phase] is None:
return None
trns_list = []
for trns_conf in transforms[phase]:
trns_name = trns_conf["name"]
trns_params = {} if trns_conf.get("params") is None else \
trns_conf["params"]
if hasattr(A, trns_name):
trns_cls = A.__getattribute__(trns_name)
trns_list.append(trns_cls(**trns_params))
else:
trns_cls = globals().get(trns_name)
if trns_cls is not None:
trns_list.append(trns_cls(**trns_params))
if len(trns_list) > 0:
return A.Compose(trns_list, p=1.0)
else:
return None
class Normalize:
def __call__(self, y: np.ndarray):
max_vol = np.abs(y).max()
y_vol = y * 1 / max_vol
return np.asfortranarray(y_vol)
class NewNormalize:
def __call__(self, y: np.ndarray):
y_mm = y - y.mean()
return y_mm / y_mm.abs().max()
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
for trns in self.transforms:
y = trns(y)
return y
class AudioTransform:
def __init__(self, always_apply=False, p=0.5):
self.always_apply = always_apply
self.p = p
def __call__(self, y: np.ndarray):
if self.always_apply:
return self.apply(y)
else:
if np.random.rand() < self.p:
return self.apply(y)
else:
return y
def apply(self, y: np.ndarray):
raise NotImplementedError
class NoiseInjection(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_noise_level=0.5, sr=32000):
super().__init__(always_apply, p)
self.noise_level = (0.0, max_noise_level)
self.sr = sr
def apply(self, y: np.ndarray, **params):
noise_level = np.random.uniform(*self.noise_level)
noise = np.random.randn(len(y))
augmented = (y + noise * noise_level).astype(y.dtype)
return augmented
class GaussianNoise(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5, max_snr=20, sr=32000):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
self.sr = sr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
white_noise = np.random.randn(len(y))
a_white = np.sqrt(white_noise ** 2).max()
augmented = (y + white_noise * 1 / a_white * a_noise).astype(y.dtype)
return augmented
class PinkNoise(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5, max_snr=20, sr=32000):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
self.sr = sr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
pink_noise = cn.powerlaw_psd_gaussian(1, len(y))
a_pink = np.sqrt(pink_noise ** 2).max()
augmented = (y + pink_noise * 1 / a_pink * a_noise).astype(y.dtype)
return augmented
class PitchShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_range=5, sr=32000):
super().__init__(always_apply, p)
self.max_range = max_range
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(-self.max_range, self.max_range)
augmented = librosa.effects.pitch_shift(y, self.sr, n_steps)
return augmented
class TimeStretch(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_rate=1, sr=32000):
super().__init__(always_apply, p)
self.max_rate = max_rate
self.sr = sr
def apply(self, y: np.ndarray, **params):
rate = np.random.uniform(0, self.max_rate)
augmented = librosa.effects.time_stretch(y, rate)
return augmented
def _db2float(db: float, amplitude=True):
if amplitude:
return 10**(db / 20)
else:
return 10 ** (db / 10)
def volume_down(y: np.ndarray, db: float):
"""
Low level API for decreasing the volume
Parameters
----------
y: numpy.ndarray
stereo / monaural input audio
db: float
how much decibel to decrease
Returns
-------
applied: numpy.ndarray
audio with decreased volume
"""
applied = y * _db2float(-db)
return applied
def volume_up(y: np.ndarray, db: float):
"""
Low level API for increasing the volume
Parameters
----------
y: numpy.ndarray
stereo / monaural input audio
db: float
how much decibel to increase
Returns
-------
applied: numpy.ndarray
audio with increased volume
"""
applied = y * _db2float(db)
return applied
class RandomVolume(AudioTransform):
def __init__(self, always_apply=False, p=0.5, limit=10):
super().__init__(always_apply, p)
self.limit = limit
def apply(self, y: np.ndarray, **params):
db = np.random.uniform(-self.limit, self.limit)
if db >= 0:
return volume_up(y, db)
else:
return volume_down(y, db)
class OneOf:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
n_trns = len(self.transforms)
trns_idx = np.random.choice(n_trns)
trns = self.transforms[trns_idx]
y = trns(y)
return y
class CosineVolume(AudioTransform):
def __init__(self, always_apply=False, p=0.5, limit=10):
super().__init__(always_apply, p)
self.limit = limit
def apply(self, y: np.ndarray, **params):
db = np.random.uniform(-self.limit, self.limit)
cosine = np.cos(np.arange(len(y)) / len(y) * np.pi * 2)
dbs = _db2float(cosine * db)
return y * dbs
def drop_stripes(image: np.ndarray, dim: int, drop_width: int, stripes_num: int):
total_width = image.shape[dim]
lowest_value = image.min()
for _ in range(stripes_num):
distance = np.random.randint(low=0, high=drop_width, size=(1,))[0]
begin = np.random.randint(
low=0, high=total_width - distance, size=(1,))[0]
if dim == 0:
image[begin:begin + distance] = lowest_value
elif dim == 1:
image[:, begin + distance] = lowest_value
elif dim == 2:
image[:, :, begin + distance] = lowest_value
return image
class TimeFreqMasking(ImageOnlyTransform):
def __init__(self,
time_drop_width: int,
time_stripes_num: int,
freq_drop_width: int,
freq_stripes_num: int,
always_apply=False,
p=0.5):
super().__init__(always_apply, p)
self.time_drop_width = time_drop_width
self.time_stripes_num = time_stripes_num
self.freq_drop_width = freq_drop_width
self.freq_stripes_num = freq_stripes_num
def apply(self, img, **params):
img_ = img.copy()
if img.ndim == 2:
img_ = drop_stripes(
img_, dim=0, drop_width=self.freq_drop_width, stripes_num=self.freq_stripes_num)
img_ = drop_stripes(
img_, dim=1, drop_width=self.time_drop_width, stripes_num=self.time_stripes_num)
return img_
# =================================================
# Model #
# =================================================
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.0)
def init_weights(model):
classname = model.__class__.__name__
if classname.find("Conv2d") != -1:
nn.init.xavier_uniform_(model.weight, gain=np.sqrt(2))
model.bias.data.fill_(0)
elif classname.find("BatchNorm") != -1:
model.weight.data.normal_(1.0, 0.02)
model.bias.data.fill_(0)
elif classname.find("GRU") != -1:
for weight in model.parameters():
if len(weight.size()) > 1:
nn.init.orghogonal_(weight.data)
elif classname.find("Linear") != -1:
model.weight.data.normal_(0, 0.01)
model.bias.data.zero_()
def do_mixup(x: torch.Tensor, mixup_lambda: torch.Tensor):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0::2].transpose(0, -1) * mixup_lambda[0::2] +
x[1::2].transpose(0, -1) * mixup_lambda[1::2]).transpose(0, -1)
return out
class Mixup(object):
def __init__(self, mixup_alpha, random_seed=1234):
"""Mixup coefficient generator.
"""
self.mixup_alpha = mixup_alpha
self.random_state = np.random.RandomState(random_seed)
def get_lambda(self, batch_size):
"""Get mixup random coefficients.
Args:
batch_size: int
Returns:
mixup_lambdas: (batch_size,)
"""
mixup_lambdas = []
for n in range(0, batch_size, 2):
lam = self.random_state.beta(
self.mixup_alpha, self.mixup_alpha, 1)[0]
mixup_lambdas.append(lam)
mixup_lambdas.append(1. - lam)
return torch.from_numpy(np.array(mixup_lambdas, dtype=np.float32))
def interpolate(x: torch.Tensor, ratio: int):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
output = F.interpolate(
framewise_output.unsqueeze(1),
size=(frames_num, framewise_output.size(2)),
align_corners=True,
mode="bilinear").squeeze(1)
return output
def gem(x: torch.Tensor, p=3, eps=1e-6):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1. / p)
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super().__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + f"(p={self.p.data.tolist()[0]:.4f}, eps={self.eps})"
class AttBlockV2(nn.Module):
def __init__(self,
in_features: int,
out_features: int,
activation="linear"):
super().__init__()
self.activation = activation
self.att = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.cla = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.tanh(self.att(x)), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
class TimmModel(nn.Module):
def __init__(self, base_model_name="tf_efficientnet_b0_ns", pooling="GeM", pretrained=True, num_classes=24, in_channels=1):
super().__init__()
self.base_model = timm.create_model(
base_model_name, pretrained=pretrained, in_chans=in_channels)
if hasattr(self.base_model, "fc"):
in_features = self.base_model.fc.in_features
self.base_model.fc = nn.Linear(in_features, num_classes)
elif hasattr(self.base_model, "classifier"):
in_features = self.base_model.classifier.in_features
self.base_model.classifier = nn.Linear(in_features, num_classes)
else:
raise NotImplementedError
if pooling == "GeM":
self.base_model.global_pool = GeM()
elif pooling == "max":
self.base_model.global_pool = SelectAdaptivePool2d(
pool_type="max", flatten=True)
self.init_layer()
def init_layer(self):
init_layer(self.base_model.classifier)
def forward(self, x):
return self.base_model(x)
class TimmSED(nn.Module):
def __init__(self, base_model_name: str, pretrained=False, num_classes=24, in_channels=1):
super().__init__()
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=CFG.n_fft, hop_length=CFG.hop_length,
win_length=CFG.n_fft, window="hann", center=True, pad_mode="reflect",
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=CFG.sample_rate, n_fft=CFG.n_fft,
n_mels=CFG.n_mels, fmin=CFG.fmin, fmax=CFG.fmax, ref=1.0, amin=1e-10, top_db=None,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(CFG.n_mels)
base_model = timm.create_model(
base_model_name, pretrained=pretrained, in_chans=in_channels)
layers = list(base_model.children())[:-2]
self.encoder = nn.Sequential(*layers)
if hasattr(base_model, "fc"):
in_features = base_model.fc.in_features
else:
in_features = base_model.classifier.in_features
self.fc1 = nn.Linear(in_features, in_features, bias=True)
self.att_block = AttBlockV2(
in_features, num_classes, activation="sigmoid")
self.init_weight()
def init_weight(self):
init_layer(self.fc1)
init_bn(self.bn0)
def forward(self, input):
# (batch_size, 1, time_steps, freq_bins)
x = self.spectrogram_extractor(input)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
x = x.transpose(2, 3)
# (batch_size, channels, freq, frames)
x = self.encoder(x)
# (batch_size, channels, frames)
x = torch.mean(x, dim=2)
# channel smoothing
x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = x.transpose(1, 2)
x = F.relu_(self.fc1(x))
x = x.transpose(1, 2)
x = F.dropout(x, p=0.5, training=self.training)
(clipwise_output, norm_att, segmentwise_output) = self.att_block(x)
logit = torch.sum(norm_att * self.att_block.cla(x), dim=2)
segmentwise_logit = self.att_block.cla(x).transpose(1, 2)
segmentwise_output = segmentwise_output.transpose(1, 2)
interpolate_ratio = frames_num // segmentwise_output.size(1)
# Get framewise output
framewise_output = interpolate(segmentwise_output,
interpolate_ratio)
framewise_output = pad_framewise_output(framewise_output, frames_num)
framewise_logit = interpolate(segmentwise_logit, interpolate_ratio)
framewise_logit = pad_framewise_output(framewise_logit, frames_num)
output_dict = {
"framewise_output": framewise_output,
"segmentwise_output": segmentwise_output,
"logit": logit,
"framewise_logit": framewise_logit,
"clipwise_output": clipwise_output
}
return output_dict
# =================================================
# Optimizer and Scheduler #
# =================================================
version_higher = (torch.__version__ >= "1.5.0")
class AdaBelief(Optimizer):
r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
weight_decouple (boolean, optional): ( default: False) If set as True, then
the optimizer uses decoupled weight decay as in AdamW
fixed_decay (boolean, optional): (default: False) This is used when weight_decouple
is set as True.
When fixed_decay == True, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay$.
When fixed_decay == False, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the
weight decay ratio decreases with learning rate (lr).
rectify (boolean, optional): (default: False) If set as True, then perform the rectified
update similar to RAdam
reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients
NeurIPS 2020 Spotlight
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, weight_decouple=False, fixed_decay=False, rectify=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdaBelief, self).__init__(params, defaults)
self.weight_decouple = weight_decouple
self.rectify = rectify
self.fixed_decay = fixed_decay
if self.weight_decouple:
print('Weight decoupling enabled in AdaBelief')
if self.fixed_decay:
print('Weight decay fixed')
if self.rectify:
print('Rectification enabled in AdaBelief')
if amsgrad:
print('AMS enabled in AdaBelief')
def __setstate__(self, state):
super(AdaBelief, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
amsgrad = group['amsgrad']
# State initialization
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdaBelief does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
beta1, beta2 = group['betas']
# State initialization
if len(state) == 0:
state['rho_inf'] = 2.0 / (1.0 - beta2) - 1.0
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
# get current state variable
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# perform weight decay, check if decoupled weight decay
if self.weight_decouple:
if not self.fixed_decay:
p.data.mul_(1.0 - group['lr'] * group['weight_decay'])
else:
p.data.mul_(1.0 - group['weight_decay'])
else:
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Update first and second moment running average
exp_avg.mul_(beta1).add_(1 - beta1, grad)
grad_residual = grad - exp_avg
exp_avg_var.mul_(beta2).addcmul_(
1 - beta2, grad_residual, grad_residual)
if amsgrad:
max_exp_avg_var = state['max_exp_avg_var']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_var, exp_avg_var,
out=max_exp_avg_var)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_var.add_(group['eps']).sqrt(
) / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_var.add_(group['eps']).sqrt(
) / math.sqrt(bias_correction2)).add_(group['eps'])
if not self.rectify:
# Default update
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
else: # Rectified update
# calculate rho_t
state['rho_t'] = state['rho_inf'] - 2 * state['step'] * beta2 ** state['step'] / (
1.0 - beta2 ** state['step'])
if state['rho_t'] > 4: # perform Adam style update if variance is small
rho_inf, rho_t = state['rho_inf'], state['rho_t']
rt = (rho_t - 4.0) * (rho_t - 2.0) * rho_inf / \
(rho_inf - 4.0) / (rho_inf - 2.0) / rho_t
rt = math.sqrt(rt)
step_size = rt * group['lr'] / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
else: # perform SGD style update
p.data.add_(-group['lr'], exp_avg)
return loss
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
defaults = dict(rho=rho, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
@torch.no_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group["rho"] / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None:
continue
e_w = p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]["e_w"] = e_w
if zero_grad:
self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
p.sub_(self.state[p]["e_w"]) # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad:
self.zero_grad()
@torch.no_grad()
def step(self, closure=None):
assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided"
# the closure should do a full forward-backward pass
closure = torch.enable_grad()(closure)
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
# put everything on the same device, in case of model parallelism
shared_device = self.param_groups[0]["params"][0].device
norm = torch.norm(
torch.stack([
p.grad.norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
__OPTIMIZERS__ = {
"AdaBelief": AdaBelief,
"SAM": SAM,
}
def get_optimizer(model: nn.Module):
optimizer_name = CFG.optimizer_name
if optimizer_name == "SAM":
base_optimizer_name = CFG.base_optimizer
if __OPTIMIZERS__.get(base_optimizer_name) is not None:
base_optimizer = __OPTIMIZERS__[base_optimizer_name]
else:
base_optimizer = optim.__getattribute__(base_optimizer_name)
return SAM(model.parameters(), base_optimizer, **CFG.optimizer_params)
if __OPTIMIZERS__.get(optimizer_name) is not None:
return __OPTIMIZERS__[optimizer_name](model.parameters(),
**CFG.optimizer_params)
else:
return optim.__getattribute__(optimizer_name)(model.parameters(),
**CFG.optimizer_params)
def get_scheduler(optimizer):
scheduler_name = CFG.scheduler_name
if scheduler_name is None:
return
else:
return optim.lr_scheduler.__getattribute__(scheduler_name)(
optimizer, **CFG.scheduler_params)
# =================================================
# Criterion #
# =================================================
# https://www.kaggle.com/c/rfcx-species-audio-detection/discussion/213075
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets, mask=None):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * \
(1. - probas)**self.gamma * bce_loss + \
(1. - targets) * probas**self.gamma * bce_loss
if mask is not None:
loss = loss * mask
loss = loss.mean()
return loss
class BCEFocal2WayLoss(nn.Module):
def __init__(self, weights=[1, 1], class_weights=None):
super().__init__()
self.focal = BCEFocalLoss()
self.weights = weights
def forward(self, input, target, mask):
input_ = input["logit"]
target = target.float()
framewise_output = input["framewise_logit"]
clipwise_output_with_max, _ = framewise_output.max(dim=1)
loss = self.focal(input_, target, mask)
aux_loss = self.focal(clipwise_output_with_max, target, mask)
return self.weights[0] * loss + self.weights[1] * aux_loss
__CRITERIONS__ = {
"BCEFocalLoss": BCEFocalLoss,
"BCEFocal2WayLoss": BCEFocal2WayLoss
}
def get_criterion():
if hasattr(nn, CFG.loss_name):
return nn.__getattribute__(CFG.loss_name)(**CFG.loss_params)
elif __CRITERIONS__.get(CFG.loss_name) is not None:
return __CRITERIONS__[CFG.loss_name](**CFG.loss_params)
else:
raise NotImplementedError
# =================================================
# Callbacks #
# =================================================
class SchedulerCallback(Callback):
def __init__(self):
super().__init__(CallbackOrder.Scheduler)
def on_loader_end(self, state: IRunner):
lr = state.scheduler.get_last_lr()
state.epoch_metrics["lr"] = lr[0]
if state.is_train_loader:
state.scheduler.step()
class SampleF1Callback(Callback):
def __init__(self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "f1",
threshold=0.5):
super().__init__(CallbackOrder.Metric)
self.input_key = input_key
self.output_key = output_key
self.prefix = prefix
self.threshold = threshold
def on_loader_start(self, state: IRunner):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: IRunner):
targ = state.input[self.input_key].detach().cpu().numpy()
out = state.output[self.output_key]
clipwise_output = out["clipwise_output"].detach().cpu().numpy()
self.prediction.append(clipwise_output)
self.target.append(targ)
y_pred = clipwise_output > self.threshold
score = metrics.f1_score(targ, y_pred, average="samples")
state.batch_metrics[self.prefix] = score
def on_loader_end(self, state: IRunner):
y_pred = np.concatenate(self.prediction, axis=0) > self.threshold
y_true = np.concatenate(self.target, axis=0)
score = metrics.f1_score(y_true, y_pred, average="samples")
state.loader_metrics[self.prefix] = score
if state.is_valid_loader:
state.epoch_metrics[state.valid_loader + "_epoch_" +
self.prefix] = score
else:
state.epoch_metrics["train_epoch_" + self.prefix] = score
class mAPCallback(Callback):
def __init__(self,
input_key: str = "targets",
output_key: str = "logits",
model_output_key: str = "clipwise_output",
prefix: str = "mAP"):
super().__init__(CallbackOrder.Metric)
self.input_key = input_key
self.output_key = output_key
self.model_output_key = model_output_key
self.prefix = prefix
def on_loader_start(self, state: IRunner):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: IRunner):
targ = state.input[self.input_key].detach().cpu().numpy()
out = state.output[self.output_key]
clipwise_output = out[self.model_output_key].detach().cpu().numpy()
self.prediction.append(clipwise_output)
self.target.append(targ)
try:
score = metrics.average_precision_score(
targ, clipwise_output, average=None)
except ValueError:
import pdb
pdb.set_trace()
score = np.nan_to_num(score).mean()
state.batch_metrics[self.prefix] = score
def on_loader_end(self, state: IRunner):
y_pred = np.concatenate(self.prediction, axis=0)
y_true = np.concatenate(self.target, axis=0)
score = metrics.average_precision_score(y_true, y_pred, average=None)
score = np.nan_to_num(score).mean()
state.loader_metrics[self.prefix] = score
if state.is_valid_loader:
state.epoch_metrics[state.valid_loader + "_epoch_" +
self.prefix] = score
else:
state.epoch_metrics["train_epoch_" + self.prefix] = score
def get_callbacks():
return [
SchedulerCallback(),
SampleF1Callback(prefix="f1_at_05", threshold=0.5),
SampleF1Callback(prefix="f1_at_03", threshold=0.3),
SampleF1Callback(prefix="f1_at_07", threshold=0.7),
mAPCallback()
]
# =================================================
# Runner #
# =================================================
class SAMRunner(Runner):
def predict_batch(self, batch, **kwargs):
return super().predict_batch(batch, **kwargs)
def _handle_batch(self, batch):
input_, target = batch["image"], batch["targets"]
mask = batch["mask"]
input_ = input_.to(self.device)
target = target.to(self.device)
mask = mask.to(self.device)
out = self.model(input_)
loss = self.criterion(out, target, mask)
self.batch_metrics.update({
"loss": loss
})
self.input = batch
self.output = {"logits": out}
if self.is_train_loader:
loss.backward()
self.optimizer.first_step(zero_grad=True)
self.criterion(self.model(input_), target, mask).backward()
self.optimizer.second_step(zero_grad=True)
class MaslkedLossRunner(Runner):
def predict_batch(self, batch, **kwargs):
return super().predict_batch(batch, **kwargs)
def _handle_batch(self, batch):
input_, target = batch["image"], batch["targets"]
mask = batch["mask"]
input_ = input_.to(self.device)
target = target.to(self.device)
mask = mask.to(self.device)
out = self.model(input_)
loss = self.criterion(out, target, mask)
self.batch_metrics.update({
"loss": loss
})
self.input = batch
self.output = {"logits": out}
if self.is_train_loader:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def get_runner(device: torch.device):
if CFG.optimizer_name == "SAM":
return SAMRunner(device=device)
else:
return MaslkedLossRunner(device=device)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
# logging
filename = __file__.split("/")[-1].replace(".py", "")
logdir = Path(f"../out/{filename}")
logdir.mkdir(exist_ok=True, parents=True)
if (logdir / "train.log").exists():
os.remove(logdir / "train.log")
logger = init_logger(log_file=logdir / "train.log")
# environment
set_seed(CFG.seed)
device = get_device()
# validation
splitter = get_split()
# data
train = pd.read_csv(CFG.train_csv)
train["datadir"] = str(CFG.train_datadir)
birdcall = pd.read_csv(CFG.birdcall_csv)
birdcall["datadir"] = str(CFG.birdcall_datadir)
birdcall = birdcall.query(
"primary_label in @CFG.target_columns").reset_index(drop=True)
train["fileid"] = train["filename"].map(lambda x: x.replace(".ogg", ""))
birdcall["fileid"] = birdcall["filename"].map(
lambda x: x.replace(".wav", ""))
train_fileids = set(train["fileid"].values.tolist())
birdcall = birdcall.query(
"fileid not in @train_fileids").reset_index(drop=True)
logger.info(f"Add {len(birdcall)} files from CBR competition")
if CFG.train:
for i, (trn_idx, val_idx) in enumerate(splitter.split(train, y=train["primary_label"])):
if i not in CFG.folds:
continue
logger.info("=" * 120)
logger.info(f"Fold {i} Training")
logger.info("=" * 120)
trn_df = train.loc[trn_idx, :].reset_index(drop=True)
val_df = train.loc[val_idx, :].reset_index(drop=True)
trn_df = pd.concat([trn_df, birdcall],
axis=0).reset_index(drop=True)
loaders = {
phase: torchdata.DataLoader(
WaveformDataset(
df_,
img_size=CFG.img_size,
waveform_transforms=get_transforms(phase),
period=CFG.period,
validation=(phase == "valid")
),
**CFG.loader_params[phase]) # type: ignore
for phase, df_ in zip(["train", "valid"], [trn_df, val_df])
}
model = TimmSED(
base_model_name=CFG.base_model_name,
pretrained=CFG.pretrained,
num_classes=CFG.num_classes,
in_channels=CFG.in_channels)
criterion = get_criterion()
optimizer = get_optimizer(model)
scheduler = get_scheduler(optimizer)
callbacks = get_callbacks()
runner = get_runner(device)
runner.train(
model=model,
criterion=criterion,
loaders=loaders,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=CFG.epochs,
verbose=True,
logdir=logdir / f"fold{i}",
callbacks=callbacks,
main_metric=CFG.main_metric,
minimize_metric=CFG.minimize_metric)
del model, optimizer, scheduler
gc.collect()
torch.cuda.empty_cache()
|
the-stack_0_8380 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
import kietcoin_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
BIP0031_VERSION = 60000
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(kietcoin_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
raise
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet3": b"\xfc\xc1\xb7\xdc", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Kietcoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
raise ValueError("Unknown command: '%s'" % (command))
except Exception as e:
logger.exception('got_data:', repr(e))
raise
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
the-stack_0_8381 | from sympy import Function, sympify, diff, Eq, S, Symbol, Derivative
from sympy.core.compatibility import (
combinations_with_replacement, iterable)
def euler_equations(L, funcs=(), vars=()):
r"""
Find the Euler-Lagrange equations [1]_ for a given Lagrangian.
Parameters
==========
L : Expr
The Lagrangian that should be a function of the functions listed
in the second argument and their derivatives.
For example, in the case of two functions `f(x,y)`, `g(x,y)` and
two independent variables `x`, `y` the Lagrangian would have the form:
.. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x},
\frac{\partial f(x,y)}{\partial y},
\frac{\partial g(x,y)}{\partial x},
\frac{\partial g(x,y)}{\partial y},x,y\right)
In many cases it is not necessary to provide anything, except the
Lagrangian, it will be autodetected (and an error raised if this
couldn't be done).
funcs : Function or an iterable of Functions
The functions that the Lagrangian depends on. The Euler equations
are differential equations for each of these functions.
vars : Symbol or an iterable of Symbols
The Symbols that are the independent variables of the functions.
Returns
=======
eqns : list of Eq
The list of differential equations, one for each function.
Examples
========
>>> from sympy import Symbol, Function
>>> from sympy.calculus.euler import euler_equations
>>> x = Function('x')
>>> t = Symbol('t')
>>> L = (x(t).diff(t))**2/2 - x(t)**2/2
>>> euler_equations(L, x(t), t)
[-x(t) - Derivative(x(t), t, t) == 0]
>>> u = Function('u')
>>> x = Symbol('x')
>>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2
>>> euler_equations(L, u(t, x), [t, x])
[-Derivative(u(t, x), t, t) + Derivative(u(t, x), x, x) == 0]
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
"""
funcs = tuple(funcs) if iterable(funcs) else (funcs,)
if not funcs:
funcs = tuple(L.atoms(Function))
else:
for f in funcs:
if not isinstance(f, Function):
raise TypeError('Function expected, got: %s' % f)
vars = tuple(vars) if iterable(vars) else (vars,)
if not vars:
vars = funcs[0].args
else:
vars = tuple(sympify(var) for var in vars)
if not all(isinstance(v, Symbol) for v in vars):
raise TypeError('Variables are not symbols, got %s' % vars)
for f in funcs:
if not vars == f.args:
raise ValueError("Variables %s don't match args: %s" % (vars, f))
order = max(len(d.variables) for d in L.atoms(Derivative)
if d.expr in funcs)
eqns = []
for f in funcs:
eq = diff(L, f)
for i in range(1, order + 1):
for p in combinations_with_replacement(vars, i):
eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p)
eqns.append(Eq(eq))
return eqns
|
the-stack_0_8382 | from constants import LIGHT_GRAY, PURPLE, RED, SCREEN_HEIGHT, SCREEN_WIDTH
class Settings:
"""A class to store all settings for Alien Invasion."""
def __init__(self):
"""Initialize the game's static settings."""
# Screen settings
self.screen_width = SCREEN_WIDTH
self.screen_height = SCREEN_HEIGHT
self.bg_color = LIGHT_GRAY
# Ship settings
self.ship_limit = 3
# Bullet settings
self.bullet_width = 3
self.bullet_height = 15
self.ship_bullet_color = PURPLE
self.alien_bullet_color = RED
self.ship_bullets_allowed = 3
# Alien settings
self.fleet_drop_speed = 10
# Difficulty settings (initial speeds multiplied by speedup_scale ** diff)
self.easy = 1
self.normal = 2
self.hard = 3
# How quickly the game speeds up
self.speedup_scale = 1.1
# How quickly the alien point values increase
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""Initialize settings that change throughout the game."""
self.ship_speed = 9.0
self.bullet_speed = 11.0
self.alien_speed = 6.0
self.alien_bullets_allowed = 1
# fleet_direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
# Scoring
self.alien_points = 50
def scale_settings(self, scale_factor):
""" Scale the bullet size and rate of fleet movement down the screen based on screen size."""
self.bullet_width *= scale_factor
self.bullet_height *= scale_factor
self.fleet_drop_speed *= scale_factor
def increase_speed(self):
"""Increase speed settings and alien point values."""
self.ship_speed *= self.speedup_scale
self.bullet_speed *= self.speedup_scale
self.alien_speed *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
def set_difficulty(self, difficulty):
"""Set the games difficulty."""
self.alien_bullets_allowed *= difficulty
|
the-stack_0_8387 | import curses
from curses import wrapper
import time
def pelotita(stdscr):
DELAY = 30000
x = 10
y = 10
stdscr.nodelay(True)
max_y, max_x = stdscr.getmaxyx()
k = 0
next_x = 0
direction_x = 1
direction_y = 1
curses.initscr()
curses.noecho()
stdscr.border()
curses.curs_set(False)
while k != ord('q'):
x += direction_x
y += direction_y
stdscr.clear()
time.sleep(0.1)
if y in (max_y - len("o"), 0):
direction_y = -direction_y # reverse
# left and right
if x in (max_x - len("o"), 0):
direction_x = -direction_x # reverse
stdscr.addstr(y,x,"o")
stdscr.refresh()
k = stdscr.getch()
curses.endwin()
def main():
curses.wrapper(pelotita)
if __name__ == "__main__":
main() |
the-stack_0_8392 | import argparse
from components import *
import time
import sys
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument('-l', action='store', dest='llvm_bc_out',
help='Destination directory where all the generated bitcode files should be stored.')
parser.add_argument('-a', action='store', dest='chipset_num',
help='Chipset number. Valid chipset numbers are: 1(mediatek)|2(qualcomm)|3(huawei)|4(samsung)',
type=int)
parser.add_argument('-m', action='store', dest='makeout',
help='Path to the makeout.txt file.')
parser.add_argument('-g', action='store', dest='compiler_name',
help='Name of the compiler used in the makeout.txt, '
'This is needed to filter out compilation commands. Ex: aarch64-linux-android-gcc')
parser.add_argument('-n', action='store', dest='arch_num',
help='Destination architecture, 32 bit (1) or 64 bit (2).',
type=int)
parser.add_argument('-o', action='store', dest='out', default=None,
help='Path to the out folder. This is the folder, which could be used as '
'output directory during compiling some kernels.')
parser.add_argument('-k', action='store', dest='kernel_src_dir',
help='Base directory of the kernel sources.')
parser.add_argument('-skb', action='store_true', dest='skip_llvm_build', default=False,
help='Skip LLVM Build (default: not skipped).')
parser.add_argument('-skl', action='store_true', dest='skip_dr_linker', default=False,
help='Skip Dr Linker (default: not skipped).')
parser.add_argument('-skp', action='store_true', dest='skip_parse_headers', default=False,
help='Skip Parsing Headers (default: not skipped).')
parser.add_argument('-ske', action='store_true', dest='skip_entry_identifier', default=False,
help='Skip Entry point identification (default: not skipped).')
parser.add_argument('-ski', action='store_true', dest='skip_soundy_checker', default=False,
help='Skip Soundy Analysis (default: not skipped).')
parser.add_argument('-f', action='store', dest='soundy_analysis_out',
help='Path to the output folder where the soundy analysis output should be stored.')
return parser
def get_bin_path(bin_name):
out_p = subprocess.check_output('which ' + bin_name, shell=True)
return out_p.strip()
def usage():
log_error("Invalid Usage.")
log_error("Run: python ", __file__, "--help", ", to know the correct usage.")
sys.exit(-1)
def main():
arg_parser = setup_args()
parsed_args = arg_parser.parse_args()
arg_dict = dict()
utils_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
utils_dir = os.path.join(utils_dir, "../llvm_analysis")
ana_helpers = os.path.join(utils_dir, "AnalysisHelpers")
passes_dir = os.path.join(utils_dir, "MainAnalysisPasses/build_dir")
# check for correct usage.
if parsed_args.llvm_bc_out is None:
usage()
# step 1: Setup common dictionary
arg_dict['llvm_bc_out'] = parsed_args.llvm_bc_out
arg_dict['dr_link_bin'] = os.path.join(ana_helpers, "Dr_linker/dr_linker")
arg_dict['chipset_num'] = parsed_args.chipset_num
arg_dict['makeout'] = parsed_args.makeout
arg_dict['clangbin'] = get_bin_path('clang')
arg_dict['compiler_name'] = parsed_args.compiler_name
arg_dict['arch_num'] = parsed_args.arch_num
arg_dict['out'] = parsed_args.out
arg_dict['c2xml_bin'] = get_bin_path('c2xml')
arg_dict['kernel_src_dir'] = parsed_args.kernel_src_dir
arg_dict['ep_finder_bin'] = os.path.join(ana_helpers, "EntryPointIdentifier/entry_point_handler")
arg_dict['opt_bin_path'] = get_bin_path('opt')
arg_dict['soundy_analysis_so'] = os.path.join(passes_dir, "SoundyAliasAnalysis/libSoundyAliasAnalysis.so")
arg_dict['soundy_analysis_out'] = parsed_args.soundy_analysis_out
arg_dict['soundy_analysis_instr_out'] = os.path.join(parsed_args.soundy_analysis_out, "instr_warnings")
arg_dict['total_warning_stats'] = os.path.join(parsed_args.soundy_analysis_out, 'warnings_stats.csv')
__add_temp_files(arg_dict)
component_times = {}
# set up all the components that need to run
target_components = list()
if not parsed_args.skip_llvm_build:
target_components.append(LLVMBuild(arg_dict))
if not parsed_args.skip_dr_linker:
target_components.append(DriverLinker(arg_dict))
if not parsed_args.skip_parse_headers:
target_components.append(ParseHeaders(arg_dict))
if not parsed_args.skip_entry_identifier:
target_components.append(EntryPointIdentifier(arg_dict))
if not parsed_args.skip_soundy_checker:
target_components.append(SoundyAnalysisRunner(arg_dict))
target_components.append(ComputeWarningStats(arg_dict))
for curr_comp in target_components:
component_name = curr_comp.get_name()
log_info("Trying to Run Component:", component_name)
ret_val = __run_component(curr_comp, component_times)
if ret_val:
log_success("Component:", component_name, " passed successfully.")
else:
log_error("Component:", component_name, " failed. Exiting.")
log_info("Component Runtime information:")
for curr_comp in component_times:
log_info(curr_comp + ":" + str(component_times[curr_comp]) + " seconds.")
def __run_component(component_obj, component_times):
"""
Run provided component.
This function takes care of running setup, performing the component.
It takes of ignoring the error, if the component is non-critical.
:param component_obj: Component object to be run.
:param component_times: Dictionary in which each components times
should be recorded.
:return: True if component ran fine else False.
"""
setup_msg = component_obj.setup()
if setup_msg is None:
log_success("Setup for component:", component_obj.get_name(), " complete")
st_time = time.time()
ret_val = component_obj.perform()
total_time = time.time() - st_time
component_times[component_obj.get_name()] = total_time
if ret_val:
log_success("Component:", component_obj.get_name(), " ran successfully.")
return True
else:
log_warning("Component:", component_obj.get_name(), " failed.")
# Ignore if the component is not critical.
if not component_obj.is_critical():
return True
else:
log_error("Setup failed for component:", component_obj.get_name(), ", with Error:", setup_msg)
return False
def __add_temp_files(target_dict):
"""
Add temp files that will be used by some components to put their output files.
:param target_dict: target dictionary to which the file paths need to be added.
:return: None
"""
target_dict['entry_point_out'] = os.path.join(target_dict['llvm_bc_out'], 'entry_point_out.txt')
target_dict['hdr_file_list'] = os.path.join(target_dict['llvm_bc_out'], 'hdr_file_config.txt')
if __name__ == "__main__":
main()
|
the-stack_0_8396 | import re
from konoha.sentence_tokenizer import SentenceTokenizer
DOCUMENT1 = """
私は猫である。にゃお。\r\n
にゃにゃ
わんわん。にゃーにゃー。
"""
DOCUMENT2 = """
私は猫である(ただしかわいいものとする。異議は認める)。にゃお。\r\n
にゃにゃ
"""
DOCUMENT3 = """
猫「にゃおにゃ。ただしかわいいものとする。異議は認める」。
にゃお。にゃにゃ
"""
DOCUMENT4 = """
わんわん。「にゃ?」(にゃー)わんわん。「わおーん。」(犬より。)
"""
DOCUMENT5 = """
わんわん。「にゃ?」(にゃー)わんわん。『わおーん。』(犬より。)
"""
DOCUMENT6 = """
わんわん。「にゃ?」(にゃー)わんわん.「わおーん。」(犬より。)
"""
def test_sentence_tokenize():
corpus = SentenceTokenizer()
expect = ["私は猫である。", "にゃお。", "にゃにゃ", "わんわん。", "にゃーにゃー。"]
result = corpus.tokenize(DOCUMENT1)
assert expect == result
def test_sentence_tokenize_with_bracket():
corpus = SentenceTokenizer()
expect = ["私は猫である(ただしかわいいものとする。異議は認める)。", "にゃお。", "にゃにゃ"]
result = corpus.tokenize(DOCUMENT2)
assert expect == result
def test_sentence_tokenize_with_quotation():
corpus = SentenceTokenizer()
expect = ["猫「にゃおにゃ。ただしかわいいものとする。異議は認める」。", "にゃお。", "にゃにゃ"]
result = corpus.tokenize(DOCUMENT3)
assert expect == result
def test_sentence_tokenize_with_combined():
corpus = SentenceTokenizer()
expect = ["わんわん。", "「にゃ?」(にゃー)わんわん。", "「わおーん。」(犬より。)"]
result = corpus.tokenize(DOCUMENT4)
assert expect == result
def test_sentence_tokenize_with_custom_patterns():
corpus = SentenceTokenizer(patterns=SentenceTokenizer.PATTERNS + [re.compile(r"『.*?』")])
expect = ["わんわん。", "「にゃ?」(にゃー)わんわん。", "『わおーん。』(犬より。)"]
result = corpus.tokenize(DOCUMENT5)
assert expect == result
def test_sentence_tokenize_with_custom_period():
corpus = SentenceTokenizer(period=".")
expect = ["わんわん。「にゃ?」(にゃー)わんわん.", "「わおーん。」(犬より。)"]
result = corpus.tokenize(DOCUMENT6)
assert expect == result
|
the-stack_0_8398 | import logging
import numpy as np
from demotivational_policy_descent.agents.agent_interface import AgentInterface
class SimpleRL(AgentInterface):
def __init__(self, env, player_id:int=1):
super().__init__(env=env, player_id=player_id)
self.reset() # Call reset here to avoid code duplication!
def reset(self):
logging.debug("Resetting parameters...")
self.observations = []
self.actions = []
self.rewards = []
logging.debug("Reset!")
def get_action(self, frame: np.array=None) -> int:
logging.debug("Returning a random action sampled from the frame..")
return np.random.choice([0, 1, 2])
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
dummy = SimpleRL(env=None, player_id=1)
dummy.test_attribute = 100
name = "dummy_test_model.mdl"
dummy.save_model(name)
dummy.test_attribute = 200
dummy.load_model(name)
assert dummy.test_attribute == 100
dummy.reset()
assert dummy.test_attribute == 5
print("Dummy action", dummy.get_action(np.zeros(1)))
|
the-stack_0_8399 | #
# This file is automatically created by Recurly's OpenAPI generation process
# and thus any edits you make by hand will be lost. If you wish to make a
# change to this file, please create a Github issue explaining the changes you
# need and we will usher them to the appropriate places.
from .resource import Resource
import datetime
class Site(Resource):
"""
Attributes
----------
address : Address
created_at : datetime
Created at
deleted_at : datetime
Deleted at
features : :obj:`list` of :obj:`str`
A list of features enabled for the site.
id : str
Site ID
mode : str
Mode
object : str
Object type
public_api_key : str
This value is used to configure RecurlyJS to submit tokenized billing information.
settings : Settings
subdomain : str
updated_at : datetime
Updated at
"""
schema = {
"address": "Address",
"created_at": datetime,
"deleted_at": datetime,
"features": list,
"id": str,
"mode": str,
"object": str,
"public_api_key": str,
"settings": "Settings",
"subdomain": str,
"updated_at": datetime,
}
class Address(Resource):
"""
Attributes
----------
city : str
City
country : str
Country, 2-letter ISO 3166-1 alpha-2 code.
phone : str
Phone number
postal_code : str
Zip or postal code.
region : str
State or province.
street1 : str
Street 1
street2 : str
Street 2
"""
schema = {
"city": str,
"country": str,
"phone": str,
"postal_code": str,
"region": str,
"street1": str,
"street2": str,
}
class Settings(Resource):
"""
Attributes
----------
accepted_currencies : :obj:`list` of :obj:`str`
billing_address_requirement : str
- full: Full Address (Street, City, State, Postal Code and Country)
- streetzip: Street and Postal Code only
- zip: Postal Code only
- none: No Address
default_currency : str
The default 3-letter ISO 4217 currency code.
"""
schema = {
"accepted_currencies": list,
"billing_address_requirement": str,
"default_currency": str,
}
class Error(Resource):
"""
Attributes
----------
message : str
Message
params : :obj:`list` of :obj:`dict`
Parameter specific errors
type : str
Type
"""
schema = {"message": str, "params": list, "type": str}
class Account(Resource):
"""
Attributes
----------
address : Address
bill_to : str
An enumerable describing the billing behavior of the account, specifically whether the account is self-paying or will rely on the parent account to pay.
billing_info : BillingInfo
cc_emails : str
Additional email address that should receive account correspondence. These should be separated only by commas. These CC emails will receive all emails that the `email` field also receives.
code : str
The unique identifier of the account. This cannot be changed once the account is created.
company : str
created_at : datetime
When the account was created.
custom_fields : :obj:`list` of :obj:`CustomField`
The custom fields will only be altered when they are included in a request. Sending an empty array will not remove any existing values. To remove a field send the name with a null or empty value.
deleted_at : datetime
If present, when the account was last marked inactive.
dunning_campaign_id : str
Unique ID to identify a dunning campaign. Used to specify if a non-default dunning campaign should be assigned to this account. For sites without multiple dunning campaigns enabled, the default dunning campaign will always be used.
email : str
The email address used for communicating with this customer. The customer will also use this email address to log into your hosted account management pages. This value does not need to be unique.
exemption_certificate : str
The tax exemption certificate number for the account. If the merchant has an integration for the Vertex tax provider, this optional value will be sent in any tax calculation requests for the account.
first_name : str
has_active_subscription : bool
Indicates if the account has an active subscription.
has_canceled_subscription : bool
Indicates if the account has a canceled subscription.
has_future_subscription : bool
Indicates if the account has a future subscription.
has_live_subscription : bool
Indicates if the account has a subscription that is either active, canceled, future, or paused.
has_past_due_invoice : bool
Indicates if the account has a past due invoice.
has_paused_subscription : bool
Indicates if the account has a paused subscription.
hosted_login_token : str
The unique token for automatically logging the account in to the hosted management pages. You may automatically log the user into their hosted management pages by directing the user to: `https://{subdomain}.recurly.com/account/{hosted_login_token}`.
id : str
invoice_template_id : str
Unique ID to identify an invoice template. Available when the Invoice Customization feature is enabled. Used to specify if a non-default invoice template will be used to generate invoices for the account. For sites without multiple invoice templates enabled, the default template will always be used.
last_name : str
object : str
Object type
parent_account_id : str
The UUID of the parent account associated with this account.
preferred_locale : str
Used to determine the language and locale of emails sent on behalf of the merchant to the customer.
shipping_addresses : :obj:`list` of :obj:`ShippingAddress`
The shipping addresses on the account.
state : str
Accounts can be either active or inactive.
tax_exempt : bool
The tax status of the account. `true` exempts tax on the account, `false` applies tax on the account.
updated_at : datetime
When the account was last changed.
username : str
A secondary value for the account.
vat_number : str
The VAT number of the account (to avoid having the VAT applied). This is only used for manually collected invoices.
"""
schema = {
"address": "Address",
"bill_to": str,
"billing_info": "BillingInfo",
"cc_emails": str,
"code": str,
"company": str,
"created_at": datetime,
"custom_fields": ["CustomField"],
"deleted_at": datetime,
"dunning_campaign_id": str,
"email": str,
"exemption_certificate": str,
"first_name": str,
"has_active_subscription": bool,
"has_canceled_subscription": bool,
"has_future_subscription": bool,
"has_live_subscription": bool,
"has_past_due_invoice": bool,
"has_paused_subscription": bool,
"hosted_login_token": str,
"id": str,
"invoice_template_id": str,
"last_name": str,
"object": str,
"parent_account_id": str,
"preferred_locale": str,
"shipping_addresses": ["ShippingAddress"],
"state": str,
"tax_exempt": bool,
"updated_at": datetime,
"username": str,
"vat_number": str,
}
class ShippingAddress(Resource):
"""
Attributes
----------
account_id : str
Account ID
city : str
company : str
country : str
Country, 2-letter ISO 3166-1 alpha-2 code.
created_at : datetime
Created at
email : str
first_name : str
id : str
Shipping Address ID
last_name : str
nickname : str
object : str
Object type
phone : str
postal_code : str
Zip or postal code.
region : str
State or province.
street1 : str
street2 : str
updated_at : datetime
Updated at
vat_number : str
"""
schema = {
"account_id": str,
"city": str,
"company": str,
"country": str,
"created_at": datetime,
"email": str,
"first_name": str,
"id": str,
"last_name": str,
"nickname": str,
"object": str,
"phone": str,
"postal_code": str,
"region": str,
"street1": str,
"street2": str,
"updated_at": datetime,
"vat_number": str,
}
class BillingInfo(Resource):
"""
Attributes
----------
account_id : str
address : Address
backup_payment_method : bool
The `backup_payment_method` field is used to indicate a billing info as a backup on the account that will be tried if the initial billing info used for an invoice is declined.
company : str
created_at : datetime
When the billing information was created.
first_name : str
fraud : FraudInfo
Most recent fraud result.
id : str
last_name : str
object : str
Object type
payment_method : PaymentMethod
primary_payment_method : bool
The `primary_payment_method` field is used to indicate the primary billing info on the account. The first billing info created on an account will always become primary. This payment method will be used
updated_at : datetime
When the billing information was last changed.
updated_by : BillingInfoUpdatedBy
valid : bool
vat_number : str
Customer's VAT number (to avoid having the VAT applied). This is only used for automatically collected invoices.
"""
schema = {
"account_id": str,
"address": "Address",
"backup_payment_method": bool,
"company": str,
"created_at": datetime,
"first_name": str,
"fraud": "FraudInfo",
"id": str,
"last_name": str,
"object": str,
"payment_method": "PaymentMethod",
"primary_payment_method": bool,
"updated_at": datetime,
"updated_by": "BillingInfoUpdatedBy",
"valid": bool,
"vat_number": str,
}
class PaymentMethod(Resource):
"""
Attributes
----------
account_type : str
The bank account type. Only present for ACH payment methods.
billing_agreement_id : str
Billing Agreement identifier. Only present for Amazon or Paypal payment methods.
card_type : str
Visa, MasterCard, American Express, Discover, JCB, etc.
cc_bin_country : str
The 2-letter ISO 3166-1 alpha-2 country code associated with the credit card BIN, if known by Recurly. Available on the BillingInfo object only. Available when the BIN country lookup feature is enabled.
exp_month : int
Expiration month.
exp_year : int
Expiration year.
first_six : str
Credit card number's first six digits.
gateway_code : str
An identifier for a specific payment gateway.
gateway_token : str
A token used in place of a credit card in order to perform transactions.
last_four : str
Credit card number's last four digits. Will refer to bank account if payment method is ACH.
last_two : str
The IBAN bank account's last two digits.
name_on_account : str
The name associated with the bank account.
object : str
routing_number : str
The bank account's routing number. Only present for ACH payment methods.
routing_number_bank : str
The bank name of this routing number.
"""
schema = {
"account_type": str,
"billing_agreement_id": str,
"card_type": str,
"cc_bin_country": str,
"exp_month": int,
"exp_year": int,
"first_six": str,
"gateway_code": str,
"gateway_token": str,
"last_four": str,
"last_two": str,
"name_on_account": str,
"object": str,
"routing_number": str,
"routing_number_bank": str,
}
class FraudInfo(Resource):
"""
Attributes
----------
decision : str
Kount decision
risk_rules_triggered : dict
Kount rules
score : int
Kount score
"""
schema = {"decision": str, "risk_rules_triggered": dict, "score": int}
class BillingInfoUpdatedBy(Resource):
"""
Attributes
----------
country : str
Country, 2-letter ISO 3166-1 alpha-2 code matching the origin IP address, if known by Recurly.
ip : str
Customer's IP address when updating their billing information.
"""
schema = {"country": str, "ip": str}
class CustomField(Resource):
"""
Attributes
----------
name : str
Fields must be created in the UI before values can be assigned to them.
value : str
Any values that resemble a credit card number or security code (CVV/CVC) will be rejected.
"""
schema = {"name": str, "value": str}
class ErrorMayHaveTransaction(Resource):
"""
Attributes
----------
message : str
Message
params : :obj:`list` of :obj:`dict`
Parameter specific errors
transaction_error : TransactionError
This is only included on errors with `type=transaction`.
type : str
Type
"""
schema = {
"message": str,
"params": list,
"transaction_error": "TransactionError",
"type": str,
}
class TransactionError(Resource):
"""
Attributes
----------
category : str
Category
code : str
Code
merchant_advice : str
Merchant message
message : str
Customer message
object : str
Object type
three_d_secure_action_token_id : str
Returned when 3-D Secure authentication is required for a transaction. Pass this value to Recurly.js so it can continue the challenge flow.
transaction_id : str
Transaction ID
"""
schema = {
"category": str,
"code": str,
"merchant_advice": str,
"message": str,
"object": str,
"three_d_secure_action_token_id": str,
"transaction_id": str,
}
class AccountAcquisition(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
campaign : str
An arbitrary identifier for the marketing campaign that led to the acquisition of this account.
channel : str
The channel through which the account was acquired.
cost : AccountAcquisitionCost
Account balance
created_at : datetime
When the account acquisition data was created.
id : str
object : str
Object type
subchannel : str
An arbitrary subchannel string representing a distinction/subcategory within a broader channel.
updated_at : datetime
When the account acquisition data was last changed.
"""
schema = {
"account": "AccountMini",
"campaign": str,
"channel": str,
"cost": "AccountAcquisitionCost",
"created_at": datetime,
"id": str,
"object": str,
"subchannel": str,
"updated_at": datetime,
}
class AccountAcquisitionCost(Resource):
"""
Attributes
----------
amount : float
The amount of the corresponding currency used to acquire the account.
currency : str
3-letter ISO 4217 currency code.
"""
schema = {"amount": float, "currency": str}
class AccountMini(Resource):
"""
Attributes
----------
bill_to : str
code : str
The unique identifier of the account.
company : str
dunning_campaign_id : str
Unique ID to identify a dunning campaign. Used to specify if a non-default dunning campaign should be assigned to this account. For sites without multiple dunning campaigns enabled, the default dunning campaign will always be used.
email : str
The email address used for communicating with this customer.
first_name : str
id : str
last_name : str
object : str
Object type
parent_account_id : str
"""
schema = {
"bill_to": str,
"code": str,
"company": str,
"dunning_campaign_id": str,
"email": str,
"first_name": str,
"id": str,
"last_name": str,
"object": str,
"parent_account_id": str,
}
class AccountBalance(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
balances : :obj:`list` of :obj:`AccountBalanceAmount`
object : str
Object type
past_due : bool
"""
schema = {
"account": "AccountMini",
"balances": ["AccountBalanceAmount"],
"object": str,
"past_due": bool,
}
class AccountBalanceAmount(Resource):
"""
Attributes
----------
amount : float
Total amount the account is past due.
currency : str
3-letter ISO 4217 currency code.
"""
schema = {"amount": float, "currency": str}
class Transaction(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
amount : float
Total transaction amount sent to the payment gateway.
avs_check : str
When processed, result from checking the overall AVS on the transaction.
backup_payment_method_used : bool
Indicates if the transaction was completed using a backup payment
billing_address : AddressWithName
collected_at : datetime
Collected at, or if not collected yet, the time the transaction was created.
collection_method : str
The method by which the payment was collected.
created_at : datetime
Created at
currency : str
3-letter ISO 4217 currency code.
customer_message : str
For declined (`success=false`) transactions, the message displayed to the customer.
customer_message_locale : str
Language code for the message
cvv_check : str
When processed, result from checking the CVV/CVC value on the transaction.
gateway_approval_code : str
Transaction approval code from the payment gateway.
gateway_message : str
Transaction message from the payment gateway.
gateway_reference : str
Transaction reference number from the payment gateway.
gateway_response_code : str
For declined transactions (`success=false`), this field lists the gateway error code.
gateway_response_time : float
Time, in seconds, for gateway to process the transaction.
gateway_response_values : dict
The values in this field will vary from gateway to gateway.
id : str
Transaction ID
invoice : InvoiceMini
Invoice mini details
ip_address_country : str
Origin IP address country, 2-letter ISO 3166-1 alpha-2 code, if known by Recurly.
ip_address_v4 : str
IP address provided when the billing information was collected:
- When the customer enters billing information into the Recurly.js or Hosted Payment Pages, Recurly records the IP address.
- When the merchant enters billing information using the API, the merchant may provide an IP address.
- When the merchant enters billing information using the UI, no IP address is recorded.
object : str
Object type
origin : str
Describes how the transaction was triggered.
original_transaction_id : str
If this transaction is a refund (`type=refund`), this will be the ID of the original transaction on the invoice being refunded.
payment_gateway : TransactionPaymentGateway
payment_method : PaymentMethod
refunded : bool
Indicates if part or all of this transaction was refunded.
status : str
The current transaction status. Note that the status may change, e.g. a `pending` transaction may become `declined` or `success` may later become `void`.
status_code : str
Status code
status_message : str
For declined (`success=false`) transactions, the message displayed to the merchant.
subscription_ids : :obj:`list` of :obj:`str`
If the transaction is charging or refunding for one or more subscriptions, these are their IDs.
success : bool
Did this transaction complete successfully?
type : str
- `authorization` – verifies billing information and places a hold on money in the customer's account.
- `capture` – captures funds held by an authorization and completes a purchase.
- `purchase` – combines the authorization and capture in one transaction.
- `refund` – returns all or a portion of the money collected in a previous transaction to the customer.
- `verify` – a $0 or $1 transaction used to verify billing information which is immediately voided.
updated_at : datetime
Updated at
uuid : str
The UUID is useful for matching data with the CSV exports and building URLs into Recurly's UI.
voided_at : datetime
Voided at
voided_by_invoice : InvoiceMini
Invoice mini details
"""
schema = {
"account": "AccountMini",
"amount": float,
"avs_check": str,
"backup_payment_method_used": bool,
"billing_address": "AddressWithName",
"collected_at": datetime,
"collection_method": str,
"created_at": datetime,
"currency": str,
"customer_message": str,
"customer_message_locale": str,
"cvv_check": str,
"gateway_approval_code": str,
"gateway_message": str,
"gateway_reference": str,
"gateway_response_code": str,
"gateway_response_time": float,
"gateway_response_values": dict,
"id": str,
"invoice": "InvoiceMini",
"ip_address_country": str,
"ip_address_v4": str,
"object": str,
"origin": str,
"original_transaction_id": str,
"payment_gateway": "TransactionPaymentGateway",
"payment_method": "PaymentMethod",
"refunded": bool,
"status": str,
"status_code": str,
"status_message": str,
"subscription_ids": list,
"success": bool,
"type": str,
"updated_at": datetime,
"uuid": str,
"voided_at": datetime,
"voided_by_invoice": "InvoiceMini",
}
class InvoiceMini(Resource):
"""
Attributes
----------
id : str
Invoice ID
number : str
Invoice number
object : str
Object type
state : str
Invoice state
type : str
Invoice type
"""
schema = {"id": str, "number": str, "object": str, "state": str, "type": str}
class AddressWithName(Resource):
"""
Attributes
----------
city : str
City
country : str
Country, 2-letter ISO 3166-1 alpha-2 code.
first_name : str
First name
last_name : str
Last name
phone : str
Phone number
postal_code : str
Zip or postal code.
region : str
State or province.
street1 : str
Street 1
street2 : str
Street 2
"""
schema = {
"city": str,
"country": str,
"first_name": str,
"last_name": str,
"phone": str,
"postal_code": str,
"region": str,
"street1": str,
"street2": str,
}
class TransactionPaymentGateway(Resource):
"""
Attributes
----------
id : str
name : str
object : str
Object type
type : str
"""
schema = {"id": str, "name": str, "object": str, "type": str}
class CouponRedemption(Resource):
"""
Attributes
----------
account : AccountMini
The Account on which the coupon was applied.
coupon : Coupon
created_at : datetime
Created at
currency : str
3-letter ISO 4217 currency code.
discounted : float
The amount that was discounted upon the application of the coupon, formatted with the currency.
id : str
Coupon Redemption ID
object : str
Will always be `coupon`.
removed_at : datetime
The date and time the redemption was removed from the account (un-redeemed).
state : str
Coupon Redemption state
subscription_id : str
Subscription ID
updated_at : datetime
Last updated at
"""
schema = {
"account": "AccountMini",
"coupon": "Coupon",
"created_at": datetime,
"currency": str,
"discounted": float,
"id": str,
"object": str,
"removed_at": datetime,
"state": str,
"subscription_id": str,
"updated_at": datetime,
}
class Coupon(Resource):
"""
Attributes
----------
applies_to_all_items : bool
The coupon is valid for all items if true. If false then `items`
will list the applicable items.
applies_to_all_plans : bool
The coupon is valid for all plans if true. If false then `plans` will list the applicable plans.
applies_to_non_plan_charges : bool
The coupon is valid for one-time, non-plan charges if true.
code : str
The code the customer enters to redeem the coupon.
coupon_type : str
Whether the coupon is "single_code" or "bulk". Bulk coupons will require a `unique_code_template` and will generate unique codes through the `/generate` endpoint.
created_at : datetime
Created at
discount : CouponDiscount
Details of the discount a coupon applies. Will contain a `type`
property and one of the following properties: `percent`, `fixed`, `trial`.
duration : str
- "single_use" coupons applies to the first invoice only.
- "temporal" coupons will apply to invoices for the duration determined by the `temporal_unit` and `temporal_amount` attributes.
expired_at : datetime
The date and time the coupon was expired early or reached its `max_redemptions`.
free_trial_amount : int
Sets the duration of time the `free_trial_unit` is for.
free_trial_unit : str
Description of the unit of time the coupon is for. Used with `free_trial_amount` to determine the duration of time the coupon is for.
hosted_page_description : str
This description will show up when a customer redeems a coupon on your Hosted Payment Pages, or if you choose to show the description on your own checkout page.
id : str
Coupon ID
invoice_description : str
Description of the coupon on the invoice.
items : :obj:`list` of :obj:`ItemMini`
A list of items for which this coupon applies. This will be
`null` if `applies_to_all_items=true`.
max_redemptions : int
A maximum number of redemptions for the coupon. The coupon will expire when it hits its maximum redemptions.
max_redemptions_per_account : int
Redemptions per account is the number of times a specific account can redeem the coupon. Set redemptions per account to `1` if you want to keep customers from gaming the system and getting more than one discount from the coupon campaign.
name : str
The internal name for the coupon.
object : str
Object type
plans : :obj:`list` of :obj:`PlanMini`
A list of plans for which this coupon applies. This will be `null` if `applies_to_all_plans=true`.
redeem_by : datetime
The date and time the coupon will expire and can no longer be redeemed. Time is always 11:59:59, the end-of-day Pacific time.
redemption_resource : str
Whether the discount is for all eligible charges on the account, or only a specific subscription.
state : str
Indicates if the coupon is redeemable, and if it is not, why.
temporal_amount : int
If `duration` is "temporal" than `temporal_amount` is an integer which is multiplied by `temporal_unit` to define the duration that the coupon will be applied to invoices for.
temporal_unit : str
If `duration` is "temporal" than `temporal_unit` is multiplied by `temporal_amount` to define the duration that the coupon will be applied to invoices for.
unique_code_template : str
On a bulk coupon, the template from which unique coupon codes are generated.
unique_coupon_code : dict
Will be populated when the Coupon being returned is a `UniqueCouponCode`.
unique_coupon_codes_count : int
When this number reaches `max_redemptions` the coupon will no longer be redeemable.
updated_at : datetime
Last updated at
"""
schema = {
"applies_to_all_items": bool,
"applies_to_all_plans": bool,
"applies_to_non_plan_charges": bool,
"code": str,
"coupon_type": str,
"created_at": datetime,
"discount": "CouponDiscount",
"duration": str,
"expired_at": datetime,
"free_trial_amount": int,
"free_trial_unit": str,
"hosted_page_description": str,
"id": str,
"invoice_description": str,
"items": ["ItemMini"],
"max_redemptions": int,
"max_redemptions_per_account": int,
"name": str,
"object": str,
"plans": ["PlanMini"],
"redeem_by": datetime,
"redemption_resource": str,
"state": str,
"temporal_amount": int,
"temporal_unit": str,
"unique_code_template": str,
"unique_coupon_code": dict,
"unique_coupon_codes_count": int,
"updated_at": datetime,
}
class PlanMini(Resource):
"""
Attributes
----------
code : str
Unique code to identify the plan. This is used in Hosted Payment Page URLs and in the invoice exports.
id : str
Plan ID
name : str
This name describes your plan and will appear on the Hosted Payment Page and the subscriber's invoice.
object : str
Object type
"""
schema = {"code": str, "id": str, "name": str, "object": str}
class ItemMini(Resource):
"""
Attributes
----------
code : str
Unique code to identify the item.
description : str
Optional, description.
id : str
Item ID
name : str
This name describes your item and will appear on the invoice when it's purchased on a one time basis.
object : str
Object type
state : str
The current state of the item.
"""
schema = {
"code": str,
"description": str,
"id": str,
"name": str,
"object": str,
"state": str,
}
class CouponDiscount(Resource):
"""
Attributes
----------
currencies : :obj:`list` of :obj:`CouponDiscountPricing`
This is only present when `type=fixed`.
percent : int
This is only present when `type=percent`.
trial : CouponDiscountTrial
This is only present when `type=free_trial`.
type : str
"""
schema = {
"currencies": ["CouponDiscountPricing"],
"percent": int,
"trial": "CouponDiscountTrial",
"type": str,
}
class CouponDiscountPricing(Resource):
"""
Attributes
----------
amount : float
Value of the fixed discount that this coupon applies.
currency : str
3-letter ISO 4217 currency code.
"""
schema = {"amount": float, "currency": str}
class CouponDiscountTrial(Resource):
"""
Attributes
----------
length : int
Trial length measured in the units specified by the sibling `unit` property
unit : str
Temporal unit of the free trial
"""
schema = {"length": int, "unit": str}
class CreditPayment(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
action : str
The action for which the credit was created.
amount : float
Total credit payment amount applied to the charge invoice.
applied_to_invoice : InvoiceMini
Invoice mini details
created_at : datetime
Created at
currency : str
3-letter ISO 4217 currency code.
id : str
Credit Payment ID
object : str
Object type
original_credit_payment_id : str
For credit payments with action `refund`, this is the credit payment that was refunded.
original_invoice : InvoiceMini
Invoice mini details
refund_transaction : Transaction
updated_at : datetime
Last updated at
uuid : str
The UUID is useful for matching data with the CSV exports and building URLs into Recurly's UI.
voided_at : datetime
Voided at
"""
schema = {
"account": "AccountMini",
"action": str,
"amount": float,
"applied_to_invoice": "InvoiceMini",
"created_at": datetime,
"currency": str,
"id": str,
"object": str,
"original_credit_payment_id": str,
"original_invoice": "InvoiceMini",
"refund_transaction": "Transaction",
"updated_at": datetime,
"uuid": str,
"voided_at": datetime,
}
class Invoice(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
address : InvoiceAddress
balance : float
The outstanding balance remaining on this invoice.
billing_info_id : str
The `billing_info_id` is the value that represents a specific billing info for an end customer. When `billing_info_id` is used to assign billing info to the subscription, all future billing events for the subscription will bill to the specified billing info. `billing_info_id` can ONLY be used for sites utilizing the Wallet feature.
closed_at : datetime
Date invoice was marked paid or failed.
collection_method : str
An automatic invoice means a corresponding transaction is run using the account's billing information at the same time the invoice is created. Manual invoices are created without a corresponding transaction. The merchant must enter a manual payment transaction or have the customer pay the invoice with an automatic method, like credit card, PayPal, Amazon, or ACH bank payment.
created_at : datetime
Created at
credit_payments : :obj:`list` of :obj:`CreditPayment`
Credit payments
currency : str
3-letter ISO 4217 currency code.
customer_notes : str
This will default to the Customer Notes text specified on the Invoice Settings. Specify custom notes to add or override Customer Notes.
discount : float
Total discounts applied to this invoice.
due_at : datetime
Date invoice is due. This is the date the net terms are reached.
dunning_campaign_id : str
Unique ID to identify the dunning campaign used when dunning the invoice. For sites without multiple dunning campaigns enabled, this will always be the default dunning campaign.
has_more_line_items : bool
Identifies if the invoice has more line items than are returned in `line_items`. If `has_more_line_items` is `true`, then a request needs to be made to the `list_invoice_line_items` endpoint.
id : str
Invoice ID
line_items : :obj:`list` of :obj:`LineItem`
Line Items
net_terms : int
Integer representing the number of days after an invoice's creation that the invoice will become past due. If an invoice's net terms are set to '0', it is due 'On Receipt' and will become past due 24 hours after it’s created. If an invoice is due net 30, it will become past due at 31 days exactly.
number : str
If VAT taxation and the Country Invoice Sequencing feature are enabled, invoices will have country-specific invoice numbers for invoices billed to EU countries (ex: FR1001). Non-EU invoices will continue to use the site-level invoice number sequence.
object : str
Object type
origin : str
The event that created the invoice.
paid : float
The total amount of successful payments transaction on this invoice.
po_number : str
For manual invoicing, this identifies the PO number associated with the subscription.
previous_invoice_id : str
On refund invoices, this value will exist and show the invoice ID of the purchase invoice the refund was created from.
refundable_amount : float
The refundable amount on a charge invoice. It will be null for all other invoices.
shipping_address : ShippingAddress
state : str
Invoice state
subscription_ids : :obj:`list` of :obj:`str`
If the invoice is charging or refunding for one or more subscriptions, these are their IDs.
subtotal : float
The summation of charges and credits, before discounts and taxes.
tax : float
The total tax on this invoice.
tax_info : TaxInfo
Tax info
terms_and_conditions : str
This will default to the Terms and Conditions text specified on the Invoice Settings page in your Recurly admin. Specify custom notes to add or override Terms and Conditions.
total : float
The final total on this invoice. The summation of invoice charges, discounts, credits, and tax.
transactions : :obj:`list` of :obj:`Transaction`
Transactions
type : str
Invoices are either charge, credit, or legacy invoices.
updated_at : datetime
Last updated at
uuid : str
Invoice UUID
vat_number : str
VAT registration number for the customer on this invoice. This will come from the VAT Number field in the Billing Info or the Account Info depending on your tax settings and the invoice collection method.
vat_reverse_charge_notes : str
VAT Reverse Charge Notes only appear if you have EU VAT enabled or are using your own Avalara AvaTax account and the customer is in the EU, has a VAT number, and is in a different country than your own. This will default to the VAT Reverse Charge Notes text specified on the Tax Settings page in your Recurly admin, unless custom notes were created with the original subscription.
"""
schema = {
"account": "AccountMini",
"address": "InvoiceAddress",
"balance": float,
"billing_info_id": str,
"closed_at": datetime,
"collection_method": str,
"created_at": datetime,
"credit_payments": ["CreditPayment"],
"currency": str,
"customer_notes": str,
"discount": float,
"due_at": datetime,
"dunning_campaign_id": str,
"has_more_line_items": bool,
"id": str,
"line_items": ["LineItem"],
"net_terms": int,
"number": str,
"object": str,
"origin": str,
"paid": float,
"po_number": str,
"previous_invoice_id": str,
"refundable_amount": float,
"shipping_address": "ShippingAddress",
"state": str,
"subscription_ids": list,
"subtotal": float,
"tax": float,
"tax_info": "TaxInfo",
"terms_and_conditions": str,
"total": float,
"transactions": ["Transaction"],
"type": str,
"updated_at": datetime,
"uuid": str,
"vat_number": str,
"vat_reverse_charge_notes": str,
}
class InvoiceAddress(Resource):
"""
Attributes
----------
city : str
City
company : str
Company
country : str
Country, 2-letter ISO 3166-1 alpha-2 code.
first_name : str
First name
last_name : str
Last name
name_on_account : str
Name on account
phone : str
Phone number
postal_code : str
Zip or postal code.
region : str
State or province.
street1 : str
Street 1
street2 : str
Street 2
"""
schema = {
"city": str,
"company": str,
"country": str,
"first_name": str,
"last_name": str,
"name_on_account": str,
"phone": str,
"postal_code": str,
"region": str,
"street1": str,
"street2": str,
}
class TaxInfo(Resource):
"""
Attributes
----------
rate : float
Rate
region : str
Provides the tax region applied on an invoice. For U.S. Sales Tax, this will be the 2 letter state code. For EU VAT this will be the 2 letter country code. For all country level tax types, this will display the regional tax, like VAT, GST, or PST.
tax_details : :obj:`list` of :obj:`TaxDetail`
Provides additional tax details for Canadian Sales Tax when there is tax applied at both the country and province levels. This will only be populated for the Invoice response when fetching a single invoice and not for the InvoiceList or LineItem.
type : str
Provides the tax type as "vat" for EU VAT, "usst" for U.S. Sales Tax, or the 2 letter country code for country level tax types like Canada, Australia, New Zealand, Israel, and all non-EU European countries.
"""
schema = {"rate": float, "region": str, "tax_details": ["TaxDetail"], "type": str}
class TaxDetail(Resource):
"""
Attributes
----------
rate : float
Provides the tax rate for the region.
region : str
Provides the tax region applied on an invoice. For Canadian Sales Tax, this will be either the 2 letter province code or country code.
tax : float
The total tax applied for this tax type.
type : str
Provides the tax type for the region. For Canadian Sales Tax, this will be GST, HST, QST or PST.
"""
schema = {"rate": float, "region": str, "tax": float, "type": str}
class LineItem(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
accounting_code : str
Internal accounting code to help you reconcile your revenue to the correct ledger. Line items created as part of a subscription invoice will use the plan or add-on's accounting code, otherwise the value will only be present if you define an accounting code when creating the line item.
add_on_code : str
If the line item is a charge or credit for an add-on, this is its code.
add_on_id : str
If the line item is a charge or credit for an add-on this is its ID.
amount : float
`(quantity * unit_amount) - (discount + tax)`
avalara_service_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the line item is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
avalara_transaction_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the line item is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
bill_for_account_id : str
The UUID of the account responsible for originating the line item.
created_at : datetime
When the line item was created.
credit_applied : float
The amount of credit from this line item that was applied to the invoice.
credit_reason_code : str
The reason the credit was given when line item is `type=credit`.
currency : str
3-letter ISO 4217 currency code.
description : str
Description that appears on the invoice. For subscription related items this will be filled in automatically.
discount : float
The discount applied to the line item.
end_date : datetime
If this date is provided, it indicates the end of a time range.
external_sku : str
Optional Stock Keeping Unit assigned to an item. Available when the Credit Invoices feature is enabled.
id : str
Line item ID
invoice_id : str
Once the line item has been invoiced this will be the invoice's ID.
invoice_number : str
Once the line item has been invoiced this will be the invoice's number. If VAT taxation and the Country Invoice Sequencing feature are enabled, invoices will have country-specific invoice numbers for invoices billed to EU countries (ex: FR1001). Non-EU invoices will continue to use the site-level invoice number sequence.
item_code : str
Unique code to identify an item. Available when the Credit Invoices feature is enabled.
item_id : str
System-generated unique identifier for an item. Available when the Credit Invoices feature is enabled.
legacy_category : str
Category to describe the role of a line item on a legacy invoice:
- "charges" refers to charges being billed for on this invoice.
- "credits" refers to refund or proration credits. This portion of the invoice can be considered a credit memo.
- "applied_credits" refers to previous credits applied to this invoice. See their original_line_item_id to determine where the credit first originated.
- "carryforwards" can be ignored. They exist to consume any remaining credit balance. A new credit with the same amount will be created and placed back on the account.
object : str
Object type
origin : str
A credit created from an original charge will have the value of the charge's origin.
original_line_item_invoice_id : str
The invoice where the credit originated. Will only have a value if the line item is a credit created from a previous credit, or if the credit was created from a charge refund.
plan_code : str
If the line item is a charge or credit for a plan or add-on, this is the plan's code.
plan_id : str
If the line item is a charge or credit for a plan or add-on, this is the plan's ID.
previous_line_item_id : str
Will only have a value if the line item is a credit created from a previous credit, or if the credit was created from a charge refund.
product_code : str
For plan-related line items this will be the plan's code, for add-on related line items it will be the add-on's code. For item-related line items it will be the item's `external_sku`.
proration_rate : float
When a line item has been prorated, this is the rate of the proration. Proration rates were made available for line items created after March 30, 2017. For line items created prior to that date, the proration rate will be `null`, even if the line item was prorated.
quantity : int
This number will be multiplied by the unit amount to compute the subtotal before any discounts or taxes.
refund : bool
Refund?
refunded_quantity : int
For refund charges, the quantity being refunded. For non-refund charges, the total quantity refunded (possibly over multiple refunds).
revenue_schedule_type : str
Revenue schedule type
shipping_address : ShippingAddress
start_date : datetime
If an end date is present, this is value indicates the beginning of a billing time range. If no end date is present it indicates billing for a specific date.
state : str
Pending line items are charges or credits on an account that have not been applied to an invoice yet. Invoiced line items will always have an `invoice_id` value.
subscription_id : str
If the line item is a charge or credit for a subscription, this is its ID.
subtotal : float
`quantity * unit_amount`
tax : float
The tax amount for the line item.
tax_code : str
Used by Avalara, Vertex, and Recurly’s EU VAT tax feature. The tax code values are specific to each tax system. If you are using Recurly’s EU VAT feature you can use `unknown`, `physical`, or `digital`.
tax_exempt : bool
`true` exempts tax on charges, `false` applies tax on charges. If not defined, then defaults to the Plan and Site settings. This attribute does not work for credits (negative line items). Credits are always applied post-tax. Pre-tax discounts should use the Coupons feature.
tax_info : TaxInfo
Tax info
taxable : bool
`true` if the line item is taxable, `false` if it is not.
type : str
Charges are positive line items that debit the account. Credits are negative line items that credit the account.
unit_amount : float
Positive amount for a charge, negative amount for a credit.
unit_amount_decimal : str
Positive amount for a charge, negative amount for a credit.
updated_at : datetime
When the line item was last changed.
uuid : str
The UUID is useful for matching data with the CSV exports and building URLs into Recurly's UI.
"""
schema = {
"account": "AccountMini",
"accounting_code": str,
"add_on_code": str,
"add_on_id": str,
"amount": float,
"avalara_service_type": int,
"avalara_transaction_type": int,
"bill_for_account_id": str,
"created_at": datetime,
"credit_applied": float,
"credit_reason_code": str,
"currency": str,
"description": str,
"discount": float,
"end_date": datetime,
"external_sku": str,
"id": str,
"invoice_id": str,
"invoice_number": str,
"item_code": str,
"item_id": str,
"legacy_category": str,
"object": str,
"origin": str,
"original_line_item_invoice_id": str,
"plan_code": str,
"plan_id": str,
"previous_line_item_id": str,
"product_code": str,
"proration_rate": float,
"quantity": int,
"refund": bool,
"refunded_quantity": int,
"revenue_schedule_type": str,
"shipping_address": "ShippingAddress",
"start_date": datetime,
"state": str,
"subscription_id": str,
"subtotal": float,
"tax": float,
"tax_code": str,
"tax_exempt": bool,
"tax_info": "TaxInfo",
"taxable": bool,
"type": str,
"unit_amount": float,
"unit_amount_decimal": str,
"updated_at": datetime,
"uuid": str,
}
class InvoiceCollection(Resource):
"""
Attributes
----------
charge_invoice : Invoice
credit_invoices : :obj:`list` of :obj:`Invoice`
Credit invoices
object : str
Object type
"""
schema = {
"charge_invoice": "Invoice",
"credit_invoices": ["Invoice"],
"object": str,
}
class AccountNote(Resource):
"""
Attributes
----------
account_id : str
created_at : datetime
id : str
message : str
object : str
Object type
user : User
"""
schema = {
"account_id": str,
"created_at": datetime,
"id": str,
"message": str,
"object": str,
"user": "User",
}
class User(Resource):
"""
Attributes
----------
created_at : datetime
deleted_at : datetime
email : str
first_name : str
id : str
last_name : str
object : str
Object type
time_zone : str
"""
schema = {
"created_at": datetime,
"deleted_at": datetime,
"email": str,
"first_name": str,
"id": str,
"last_name": str,
"object": str,
"time_zone": str,
}
class Subscription(Resource):
"""
Attributes
----------
account : AccountMini
Account mini details
activated_at : datetime
Activated at
add_ons : :obj:`list` of :obj:`SubscriptionAddOn`
Add-ons
add_ons_total : float
Total price of add-ons
auto_renew : bool
Whether the subscription renews at the end of its term.
bank_account_authorized_at : datetime
Recurring subscriptions paid with ACH will have this attribute set. This timestamp is used for alerting customers to reauthorize in 3 years in accordance with NACHA rules. If a subscription becomes inactive or the billing info is no longer a bank account, this timestamp is cleared.
billing_info_id : str
Billing Info ID.
canceled_at : datetime
Canceled at
collection_method : str
Collection method
coupon_redemptions : :obj:`list` of :obj:`CouponRedemptionMini`
Returns subscription level coupon redemptions that are tied to this subscription.
created_at : datetime
Created at
currency : str
3-letter ISO 4217 currency code.
current_period_ends_at : datetime
Current billing period ends at
current_period_started_at : datetime
Current billing period started at
current_term_ends_at : datetime
When the term ends. This is calculated by a plan's interval and `total_billing_cycles` in a term. Subscription changes with a `timeframe=renewal` will be applied on this date.
current_term_started_at : datetime
The start date of the term when the first billing period starts. The subscription term is the length of time that a customer will be committed to a subscription. A term can span multiple billing periods.
custom_fields : :obj:`list` of :obj:`CustomField`
The custom fields will only be altered when they are included in a request. Sending an empty array will not remove any existing values. To remove a field send the name with a null or empty value.
customer_notes : str
Customer notes
expiration_reason : str
Expiration reason
expires_at : datetime
Expires at
gateway_code : str
If present, this subscription's transactions will use the payment gateway with this code.
id : str
Subscription ID
net_terms : int
Integer representing the number of days after an invoice's creation that the invoice will become past due. If an invoice's net terms are set to '0', it is due 'On Receipt' and will become past due 24 hours after it’s created. If an invoice is due net 30, it will become past due at 31 days exactly.
object : str
Object type
paused_at : datetime
Null unless subscription is paused or will pause at the end of the current billing period.
pending_change : SubscriptionChange
Subscription Change
plan : PlanMini
Just the important parts.
po_number : str
For manual invoicing, this identifies the PO number associated with the subscription.
quantity : int
Subscription quantity
remaining_billing_cycles : int
The remaining billing cycles in the current term.
remaining_pause_cycles : int
Null unless subscription is paused or will pause at the end of the current billing period.
renewal_billing_cycles : int
If `auto_renew=true`, when a term completes, `total_billing_cycles` takes this value as the length of subsequent terms. Defaults to the plan's `total_billing_cycles`.
revenue_schedule_type : str
Revenue schedule type
shipping : SubscriptionShipping
Subscription shipping details
state : str
State
subtotal : float
Estimated total, before tax.
tax : float
Estimated tax
tax_info : TaxInfo
Tax info
terms_and_conditions : str
Terms and conditions
total : float
Estimated total
total_billing_cycles : int
The number of cycles/billing periods in a term. When `remaining_billing_cycles=0`, if `auto_renew=true` the subscription will renew and a new term will begin, otherwise the subscription will expire.
trial_ends_at : datetime
Trial period ends at
trial_started_at : datetime
Trial period started at
unit_amount : float
Subscription unit price
updated_at : datetime
Last updated at
uuid : str
The UUID is useful for matching data with the CSV exports and building URLs into Recurly's UI.
"""
schema = {
"account": "AccountMini",
"activated_at": datetime,
"add_ons": ["SubscriptionAddOn"],
"add_ons_total": float,
"auto_renew": bool,
"bank_account_authorized_at": datetime,
"billing_info_id": str,
"canceled_at": datetime,
"collection_method": str,
"coupon_redemptions": ["CouponRedemptionMini"],
"created_at": datetime,
"currency": str,
"current_period_ends_at": datetime,
"current_period_started_at": datetime,
"current_term_ends_at": datetime,
"current_term_started_at": datetime,
"custom_fields": ["CustomField"],
"customer_notes": str,
"expiration_reason": str,
"expires_at": datetime,
"gateway_code": str,
"id": str,
"net_terms": int,
"object": str,
"paused_at": datetime,
"pending_change": "SubscriptionChange",
"plan": "PlanMini",
"po_number": str,
"quantity": int,
"remaining_billing_cycles": int,
"remaining_pause_cycles": int,
"renewal_billing_cycles": int,
"revenue_schedule_type": str,
"shipping": "SubscriptionShipping",
"state": str,
"subtotal": float,
"tax": float,
"tax_info": "TaxInfo",
"terms_and_conditions": str,
"total": float,
"total_billing_cycles": int,
"trial_ends_at": datetime,
"trial_started_at": datetime,
"unit_amount": float,
"updated_at": datetime,
"uuid": str,
}
class SubscriptionShipping(Resource):
"""
Attributes
----------
address : ShippingAddress
amount : float
Subscription's shipping cost
method : ShippingMethodMini
object : str
Object type
"""
schema = {
"address": "ShippingAddress",
"amount": float,
"method": "ShippingMethodMini",
"object": str,
}
class ShippingMethodMini(Resource):
"""
Attributes
----------
code : str
The internal name used identify the shipping method.
id : str
Shipping Method ID
name : str
The name of the shipping method displayed to customers.
object : str
Object type
"""
schema = {"code": str, "id": str, "name": str, "object": str}
class CouponRedemptionMini(Resource):
"""
Attributes
----------
coupon : CouponMini
created_at : datetime
Created at
discounted : float
The amount that was discounted upon the application of the coupon, formatted with the currency.
id : str
Coupon Redemption ID
object : str
Will always be `coupon`.
state : str
Invoice state
"""
schema = {
"coupon": "CouponMini",
"created_at": datetime,
"discounted": float,
"id": str,
"object": str,
"state": str,
}
class CouponMini(Resource):
"""
Attributes
----------
code : str
The code the customer enters to redeem the coupon.
coupon_type : str
Whether the coupon is "single_code" or "bulk". Bulk coupons will require a `unique_code_template` and will generate unique codes through the `/generate` endpoint.
discount : CouponDiscount
Details of the discount a coupon applies. Will contain a `type`
property and one of the following properties: `percent`, `fixed`, `trial`.
expired_at : datetime
The date and time the coupon was expired early or reached its `max_redemptions`.
id : str
Coupon ID
name : str
The internal name for the coupon.
object : str
Object type
state : str
Indicates if the coupon is redeemable, and if it is not, why.
"""
schema = {
"code": str,
"coupon_type": str,
"discount": "CouponDiscount",
"expired_at": datetime,
"id": str,
"name": str,
"object": str,
"state": str,
}
class SubscriptionChange(Resource):
"""
Attributes
----------
activate_at : datetime
Activated at
activated : bool
Returns `true` if the subscription change is activated.
add_ons : :obj:`list` of :obj:`SubscriptionAddOn`
These add-ons will be used when the subscription renews.
billing_info : SubscriptionChangeBillingInfo
Accept nested attributes for three_d_secure_action_result_token_id
created_at : datetime
Created at
custom_fields : :obj:`list` of :obj:`CustomField`
The custom fields will only be altered when they are included in a request. Sending an empty array will not remove any existing values. To remove a field send the name with a null or empty value.
deleted_at : datetime
Deleted at
id : str
The ID of the Subscription Change.
invoice_collection : InvoiceCollection
Invoice Collection
object : str
Object type
plan : PlanMini
Just the important parts.
quantity : int
Subscription quantity
revenue_schedule_type : str
Revenue schedule type
shipping : SubscriptionShipping
Subscription shipping details
subscription_id : str
The ID of the subscription that is going to be changed.
tax_inclusive : bool
Determines whether or not tax is included in the unit amount. The Tax Inclusive Pricing feature (separate from the Mixed Tax Pricing feature) must be enabled to use this flag.
unit_amount : float
Unit amount
updated_at : datetime
Updated at
"""
schema = {
"activate_at": datetime,
"activated": bool,
"add_ons": ["SubscriptionAddOn"],
"billing_info": "SubscriptionChangeBillingInfo",
"created_at": datetime,
"custom_fields": ["CustomField"],
"deleted_at": datetime,
"id": str,
"invoice_collection": "InvoiceCollection",
"object": str,
"plan": "PlanMini",
"quantity": int,
"revenue_schedule_type": str,
"shipping": "SubscriptionShipping",
"subscription_id": str,
"tax_inclusive": bool,
"unit_amount": float,
"updated_at": datetime,
}
class SubscriptionAddOn(Resource):
"""
Attributes
----------
add_on : AddOnMini
Just the important parts.
add_on_source : str
Used to determine where the associated add-on data is pulled from. If this value is set to
`plan_add_on` or left blank, then add-on data will be pulled from the plan's add-ons. If the associated
`plan` has `allow_any_item_on_subscriptions` set to `true` and this field is set to `item`, then
the associated add-on data will be pulled from the site's item catalog.
created_at : datetime
Created at
expired_at : datetime
Expired at
id : str
Subscription Add-on ID
object : str
Object type
percentage_tiers : :obj:`list` of :obj:`SubscriptionAddOnPercentageTier`
If percentage tiers are provided in the request, all existing percentage tiers on the Subscription Add-on will be
removed and replaced by the percentage tiers in the request. Use only if add_on.tier_type is tiered or volume and
add_on.usage_type is percentage
quantity : int
Add-on quantity
revenue_schedule_type : str
Revenue schedule type
subscription_id : str
Subscription ID
tier_type : str
The pricing model for the add-on. For more information,
[click here](https://docs.recurly.com/docs/billing-models#section-quantity-based). See our
[Guide](https://developers.recurly.com/guides/item-addon-guide.html) for an overview of how
to configure quantity-based pricing models.
tiers : :obj:`list` of :obj:`SubscriptionAddOnTier`
If tiers are provided in the request, all existing tiers on the Subscription Add-on will be
removed and replaced by the tiers in the request. If add_on.tier_type is tiered or volume and
add_on.usage_type is percentage use percentage_tiers instead.
unit_amount : float
Supports up to 2 decimal places.
unit_amount_decimal : str
Supports up to 9 decimal places.
updated_at : datetime
Updated at
usage_percentage : float
The percentage taken of the monetary amount of usage tracked. This can be up to 4 decimal places. A value between 0.0 and 100.0. Required if add_on_type is usage and usage_type is percentage.
"""
schema = {
"add_on": "AddOnMini",
"add_on_source": str,
"created_at": datetime,
"expired_at": datetime,
"id": str,
"object": str,
"percentage_tiers": ["SubscriptionAddOnPercentageTier"],
"quantity": int,
"revenue_schedule_type": str,
"subscription_id": str,
"tier_type": str,
"tiers": ["SubscriptionAddOnTier"],
"unit_amount": float,
"unit_amount_decimal": str,
"updated_at": datetime,
"usage_percentage": float,
}
class AddOnMini(Resource):
"""
Attributes
----------
accounting_code : str
Accounting code for invoice line items for this add-on. If no value is provided, it defaults to add-on's code.
add_on_type : str
Whether the add-on type is fixed, or usage-based.
code : str
The unique identifier for the add-on within its plan.
external_sku : str
Optional, stock keeping unit to link the item to other inventory systems.
id : str
Add-on ID
item_id : str
Item ID
measured_unit_id : str
System-generated unique identifier for an measured unit associated with the add-on.
name : str
Describes your add-on and will appear in subscribers' invoices.
object : str
Object type
usage_percentage : float
The percentage taken of the monetary amount of usage tracked. This can be up to 4 decimal places. A value between 0.0 and 100.0.
usage_type : str
Type of usage, returns usage type if `add_on_type` is `usage`.
"""
schema = {
"accounting_code": str,
"add_on_type": str,
"code": str,
"external_sku": str,
"id": str,
"item_id": str,
"measured_unit_id": str,
"name": str,
"object": str,
"usage_percentage": float,
"usage_type": str,
}
class SubscriptionAddOnTier(Resource):
"""
Attributes
----------
ending_quantity : int
Ending quantity
unit_amount : float
Allows up to 2 decimal places. Optionally, override the tiers' default unit amount. If add-on's `add_on_type` is `usage` and `usage_type` is `percentage`, cannot be provided.
unit_amount_decimal : str
Allows up to 9 decimal places. Optionally, override tiers' default unit amount.
If `unit_amount_decimal` is provided, `unit_amount` cannot be provided.
If add-on's `add_on_type` is `usage` and `usage_type` is `percentage`, cannot be provided.
usage_percentage : str
(deprecated) -- Use the percentage_tiers object instead.
"""
schema = {
"ending_quantity": int,
"unit_amount": float,
"unit_amount_decimal": str,
"usage_percentage": str,
}
class SubscriptionAddOnPercentageTier(Resource):
"""
Attributes
----------
ending_amount : float
Ending amount
usage_percentage : str
The percentage taken of the monetary amount of usage tracked.
This can be up to 4 decimal places represented as a string. A value between
0.0 and 100.0.
"""
schema = {"ending_amount": float, "usage_percentage": str}
class SubscriptionChangeBillingInfo(Resource):
"""
Attributes
----------
three_d_secure_action_result_token_id : str
A token generated by Recurly.js after completing a 3-D Secure device fingerprinting or authentication challenge.
"""
schema = {"three_d_secure_action_result_token_id": str}
class UniqueCouponCodeParams(Resource):
"""
Attributes
----------
begin_time : datetime
The date-time to be included when listing UniqueCouponCodes
limit : int
The number of UniqueCouponCodes that will be generated
order : str
Sort order to list newly generated UniqueCouponCodes (should always be `asc`)
sort : str
Sort field to list newly generated UniqueCouponCodes (should always be `created_at`)
"""
schema = {"begin_time": datetime, "limit": int, "order": str, "sort": str}
class UniqueCouponCode(Resource):
"""
Attributes
----------
bulk_coupon_code : str
The Coupon code of the parent Bulk Coupon
bulk_coupon_id : str
The Coupon ID of the parent Bulk Coupon
code : str
The code the customer enters to redeem the coupon.
created_at : datetime
Created at
expired_at : datetime
The date and time the coupon was expired early or reached its `max_redemptions`.
id : str
Unique Coupon Code ID
object : str
Object type
redeemed_at : datetime
The date and time the unique coupon code was redeemed.
state : str
Indicates if the unique coupon code is redeemable or why not.
updated_at : datetime
Updated at
"""
schema = {
"bulk_coupon_code": str,
"bulk_coupon_id": str,
"code": str,
"created_at": datetime,
"expired_at": datetime,
"id": str,
"object": str,
"redeemed_at": datetime,
"state": str,
"updated_at": datetime,
}
class CustomFieldDefinition(Resource):
"""
Attributes
----------
created_at : datetime
Created at
deleted_at : datetime
Definitions are initially soft deleted, and once all the values are removed from the accouts or subscriptions, will be hard deleted an no longer visible.
display_name : str
Used to label the field when viewing and editing the field in Recurly's admin UI.
id : str
Custom field definition ID
name : str
Used by the API to identify the field or reading and writing. The name can only be used once per Recurly object type.
object : str
Object type
related_type : str
Related Recurly object type
tooltip : str
Displayed as a tooltip when editing the field in the Recurly admin UI.
updated_at : datetime
Last updated at
user_access : str
The access control applied inside Recurly's admin UI:
- `api_only` - No one will be able to view or edit this field's data via the admin UI.
- `read_only` - Users with the Customers role will be able to view this field's data via the admin UI, but
editing will only be available via the API.
- `write` - Users with the Customers role will be able to view and edit this field's data via the admin UI.
"""
schema = {
"created_at": datetime,
"deleted_at": datetime,
"display_name": str,
"id": str,
"name": str,
"object": str,
"related_type": str,
"tooltip": str,
"updated_at": datetime,
"user_access": str,
}
class Item(Resource):
"""
Attributes
----------
accounting_code : str
Accounting code for invoice line items.
avalara_service_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the item is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
avalara_transaction_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the item is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
code : str
Unique code to identify the item.
created_at : datetime
Created at
currencies : :obj:`list` of :obj:`Pricing`
Item Pricing
custom_fields : :obj:`list` of :obj:`CustomField`
The custom fields will only be altered when they are included in a request. Sending an empty array will not remove any existing values. To remove a field send the name with a null or empty value.
deleted_at : datetime
Deleted at
description : str
Optional, description.
external_sku : str
Optional, stock keeping unit to link the item to other inventory systems.
id : str
Item ID
name : str
This name describes your item and will appear on the invoice when it's purchased on a one time basis.
object : str
Object type
revenue_schedule_type : str
Revenue schedule type
state : str
The current state of the item.
tax_code : str
Used by Avalara, Vertex, and Recurly’s EU VAT tax feature. The tax code values are specific to each tax system. If you are using Recurly’s EU VAT feature you can use `unknown`, `physical`, or `digital`.
tax_exempt : bool
`true` exempts tax on the item, `false` applies tax on the item.
updated_at : datetime
Last updated at
"""
schema = {
"accounting_code": str,
"avalara_service_type": int,
"avalara_transaction_type": int,
"code": str,
"created_at": datetime,
"currencies": ["Pricing"],
"custom_fields": ["CustomField"],
"deleted_at": datetime,
"description": str,
"external_sku": str,
"id": str,
"name": str,
"object": str,
"revenue_schedule_type": str,
"state": str,
"tax_code": str,
"tax_exempt": bool,
"updated_at": datetime,
}
class Pricing(Resource):
"""
Attributes
----------
currency : str
3-letter ISO 4217 currency code.
tax_inclusive : bool
Determines whether or not tax is included in the unit amount. The Tax Inclusive Pricing feature (separate from the Mixed Tax Pricing feature) must be enabled to use this flag.
unit_amount : float
Unit price
"""
schema = {"currency": str, "tax_inclusive": bool, "unit_amount": float}
class MeasuredUnit(Resource):
"""
Attributes
----------
created_at : datetime
Created at
deleted_at : datetime
Deleted at
description : str
Optional internal description.
display_name : str
Display name for the measured unit. Can only contain spaces, underscores and must be alphanumeric.
id : str
Item ID
name : str
Unique internal name of the measured unit on your site.
object : str
Object type
state : str
The current state of the measured unit.
updated_at : datetime
Last updated at
"""
schema = {
"created_at": datetime,
"deleted_at": datetime,
"description": str,
"display_name": str,
"id": str,
"name": str,
"object": str,
"state": str,
"updated_at": datetime,
}
class BinaryFile(Resource):
"""
Attributes
----------
data : str
"""
schema = {"data": str}
class Plan(Resource):
"""
Attributes
----------
accounting_code : str
Accounting code for invoice line items for the plan. If no value is provided, it defaults to plan's code.
allow_any_item_on_subscriptions : bool
Used to determine whether items can be assigned as add-ons to individual subscriptions.
If `true`, items can be assigned as add-ons to individual subscription add-ons.
If `false`, only plan add-ons can be used.
auto_renew : bool
Subscriptions will automatically inherit this value once they are active. If `auto_renew` is `true`, then a subscription will automatically renew its term at renewal. If `auto_renew` is `false`, then a subscription will expire at the end of its term. `auto_renew` can be overridden on the subscription record itself.
avalara_service_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the plan is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
avalara_transaction_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the plan is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
code : str
Unique code to identify the plan. This is used in Hosted Payment Page URLs and in the invoice exports.
created_at : datetime
Created at
currencies : :obj:`list` of :obj:`PlanPricing`
Pricing
deleted_at : datetime
Deleted at
description : str
Optional description, not displayed.
dunning_campaign_id : str
Unique ID to identify a dunning campaign. Used to specify if a non-default dunning campaign should be assigned to this plan. For sites without multiple dunning campaigns enabled, the default dunning campaign will always be used.
hosted_pages : PlanHostedPages
Hosted pages settings
id : str
Plan ID
interval_length : int
Length of the plan's billing interval in `interval_unit`.
interval_unit : str
Unit for the plan's billing interval.
name : str
This name describes your plan and will appear on the Hosted Payment Page and the subscriber's invoice.
object : str
Object type
revenue_schedule_type : str
Revenue schedule type
setup_fee_accounting_code : str
Accounting code for invoice line items for the plan's setup fee. If no value is provided, it defaults to plan's accounting code.
setup_fee_revenue_schedule_type : str
Setup fee revenue schedule type
state : str
The current state of the plan.
tax_code : str
Used by Avalara, Vertex, and Recurly’s EU VAT tax feature. The tax code values are specific to each tax system. If you are using Recurly’s EU VAT feature you can use `unknown`, `physical`, or `digital`.
tax_exempt : bool
`true` exempts tax on the plan, `false` applies tax on the plan.
total_billing_cycles : int
Automatically terminate subscriptions after a defined number of billing cycles. Number of billing cycles before the plan automatically stops renewing, defaults to `null` for continuous, automatic renewal.
trial_length : int
Length of plan's trial period in `trial_units`. `0` means `no trial`.
trial_requires_billing_info : bool
Allow free trial subscriptions to be created without billing info. Should not be used if billing info is needed for initial invoice due to existing uninvoiced charges or setup fee.
trial_unit : str
Units for the plan's trial period.
updated_at : datetime
Last updated at
"""
schema = {
"accounting_code": str,
"allow_any_item_on_subscriptions": bool,
"auto_renew": bool,
"avalara_service_type": int,
"avalara_transaction_type": int,
"code": str,
"created_at": datetime,
"currencies": ["PlanPricing"],
"deleted_at": datetime,
"description": str,
"dunning_campaign_id": str,
"hosted_pages": "PlanHostedPages",
"id": str,
"interval_length": int,
"interval_unit": str,
"name": str,
"object": str,
"revenue_schedule_type": str,
"setup_fee_accounting_code": str,
"setup_fee_revenue_schedule_type": str,
"state": str,
"tax_code": str,
"tax_exempt": bool,
"total_billing_cycles": int,
"trial_length": int,
"trial_requires_billing_info": bool,
"trial_unit": str,
"updated_at": datetime,
}
class PlanPricing(Resource):
"""
Attributes
----------
currency : str
3-letter ISO 4217 currency code.
setup_fee : float
Amount of one-time setup fee automatically charged at the beginning of a subscription billing cycle. For subscription plans with a trial, the setup fee will be charged at the time of signup. Setup fees do not increase with the quantity of a subscription plan.
tax_inclusive : bool
Determines whether or not tax is included in the unit amount. The Tax Inclusive Pricing feature (separate from the Mixed Tax Pricing feature) must be enabled to use this flag.
unit_amount : float
Unit price
"""
schema = {
"currency": str,
"setup_fee": float,
"tax_inclusive": bool,
"unit_amount": float,
}
class PlanHostedPages(Resource):
"""
Attributes
----------
bypass_confirmation : bool
If `true`, the customer will be sent directly to your `success_url` after a successful signup, bypassing Recurly's hosted confirmation page.
cancel_url : str
URL to redirect to on canceled signup on the hosted payment pages.
display_quantity : bool
Determines if the quantity field is displayed on the hosted pages for the plan.
success_url : str
URL to redirect to after signup on the hosted payment pages.
"""
schema = {
"bypass_confirmation": bool,
"cancel_url": str,
"display_quantity": bool,
"success_url": str,
}
class AddOn(Resource):
"""
Attributes
----------
accounting_code : str
Accounting code for invoice line items for this add-on. If no value is provided, it defaults to add-on's code.
add_on_type : str
Whether the add-on type is fixed, or usage-based.
avalara_service_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the add-on is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
avalara_transaction_type : int
Used by Avalara for Communications taxes. The transaction type in combination with the service type describe how the add-on is taxed. Refer to [the documentation](https://help.avalara.com/AvaTax_for_Communications/Tax_Calculation/AvaTax_for_Communications_Tax_Engine/Mapping_Resources/TM_00115_AFC_Modules_Corresponding_Transaction_Types) for more available t/s types.
code : str
The unique identifier for the add-on within its plan.
created_at : datetime
Created at
currencies : :obj:`list` of :obj:`AddOnPricing`
Add-on pricing
default_quantity : int
Default quantity for the hosted pages.
deleted_at : datetime
Deleted at
display_quantity : bool
Determines if the quantity field is displayed on the hosted pages for the add-on.
external_sku : str
Optional, stock keeping unit to link the item to other inventory systems.
id : str
Add-on ID
item : ItemMini
Just the important parts.
measured_unit_id : str
System-generated unique identifier for an measured unit associated with the add-on.
name : str
Describes your add-on and will appear in subscribers' invoices.
object : str
Object type
optional : bool
Whether the add-on is optional for the customer to include in their purchase on the hosted payment page. If false, the add-on will be included when a subscription is created through the Recurly UI. However, the add-on will not be included when a subscription is created through the API.
percentage_tiers : :obj:`list` of :obj:`PercentageTiersByCurrency`
Percentage Tiers
plan_id : str
Plan ID
revenue_schedule_type : str
When this add-on is invoiced, the line item will use this revenue schedule. If `item_code`/`item_id` is part of the request then `revenue_schedule_type` must be absent in the request as the value will be set from the item.
state : str
Add-ons can be either active or inactive.
tax_code : str
Used by Avalara, Vertex, and Recurly’s EU VAT tax feature. The tax code values are specific to each tax system. If you are using Recurly’s EU VAT feature you can use `unknown`, `physical`, or `digital`.
tier_type : str
The pricing model for the add-on. For more information,
[click here](https://docs.recurly.com/docs/billing-models#section-quantity-based). See our
[Guide](https://developers.recurly.com/guides/item-addon-guide.html) for an overview of how
to configure quantity-based pricing models.
tiers : :obj:`list` of :obj:`Tier`
Tiers
updated_at : datetime
Last updated at
usage_percentage : float
The percentage taken of the monetary amount of usage tracked. This can be up to 4 decimal places. A value between 0.0 and 100.0.
usage_type : str
Type of usage, returns usage type if `add_on_type` is `usage`.
"""
schema = {
"accounting_code": str,
"add_on_type": str,
"avalara_service_type": int,
"avalara_transaction_type": int,
"code": str,
"created_at": datetime,
"currencies": ["AddOnPricing"],
"default_quantity": int,
"deleted_at": datetime,
"display_quantity": bool,
"external_sku": str,
"id": str,
"item": "ItemMini",
"measured_unit_id": str,
"name": str,
"object": str,
"optional": bool,
"percentage_tiers": ["PercentageTiersByCurrency"],
"plan_id": str,
"revenue_schedule_type": str,
"state": str,
"tax_code": str,
"tier_type": str,
"tiers": ["Tier"],
"updated_at": datetime,
"usage_percentage": float,
"usage_type": str,
}
class AddOnPricing(Resource):
"""
Attributes
----------
currency : str
3-letter ISO 4217 currency code.
tax_inclusive : bool
Determines whether or not tax is included in the unit amount. The Tax Inclusive Pricing feature (separate from the Mixed Tax Pricing feature) must be enabled to use this flag.
unit_amount : float
Allows up to 2 decimal places. Required unless `unit_amount_decimal` is provided.
unit_amount_decimal : str
Allows up to 9 decimal places. Only supported when `add_on_type` = `usage`.
If `unit_amount_decimal` is provided, `unit_amount` cannot be provided.
"""
schema = {
"currency": str,
"tax_inclusive": bool,
"unit_amount": float,
"unit_amount_decimal": str,
}
class Tier(Resource):
"""
Attributes
----------
currencies : :obj:`list` of :obj:`TierPricing`
Tier pricing
ending_quantity : int
Ending quantity for the tier. This represents a unit amount for unit-priced add ons.
usage_percentage : str
(deprecated) -- Use the percentage_tiers object instead.
"""
schema = {
"currencies": ["TierPricing"],
"ending_quantity": int,
"usage_percentage": str,
}
class TierPricing(Resource):
"""
Attributes
----------
currency : str
3-letter ISO 4217 currency code.
unit_amount : float
Allows up to 2 decimal places. Required unless `unit_amount_decimal` is provided.
unit_amount_decimal : str
Allows up to 9 decimal places. Only supported when `add_on_type` = `usage`.
If `unit_amount_decimal` is provided, `unit_amount` cannot be provided.
"""
schema = {"currency": str, "unit_amount": float, "unit_amount_decimal": str}
class PercentageTiersByCurrency(Resource):
"""
Attributes
----------
currency : str
3-letter ISO 4217 currency code.
tiers : :obj:`list` of :obj:`PercentageTier`
Tiers
"""
schema = {"currency": str, "tiers": ["PercentageTier"]}
class PercentageTier(Resource):
"""
Attributes
----------
ending_amount : float
Ending amount for the tier. Allows up to 2 decimal places. The last tier ending_amount is null.
usage_percentage : str
Decimal usage percentage.
"""
schema = {"ending_amount": float, "usage_percentage": str}
class ShippingMethod(Resource):
"""
Attributes
----------
accounting_code : str
Accounting code for shipping method.
code : str
The internal name used identify the shipping method.
created_at : datetime
Created at
deleted_at : datetime
Deleted at
id : str
Shipping Method ID
name : str
The name of the shipping method displayed to customers.
object : str
Object type
tax_code : str
Used by Avalara, Vertex, and Recurly’s built-in tax feature. The tax
code values are specific to each tax system. If you are using Recurly’s
built-in taxes the values are:
- `FR` – Common Carrier FOB Destination
- `FR022000` – Common Carrier FOB Origin
- `FR020400` – Non Common Carrier FOB Destination
- `FR020500` – Non Common Carrier FOB Origin
- `FR010100` – Delivery by Company Vehicle Before Passage of Title
- `FR010200` – Delivery by Company Vehicle After Passage of Title
- `NT` – Non-Taxable
updated_at : datetime
Last updated at
"""
schema = {
"accounting_code": str,
"code": str,
"created_at": datetime,
"deleted_at": datetime,
"id": str,
"name": str,
"object": str,
"tax_code": str,
"updated_at": datetime,
}
class Usage(Resource):
"""
Attributes
----------
amount : float
The amount of usage. Can be positive, negative, or 0. No decimals allowed, we will strip them. If the usage-based add-on is billed with a percentage, your usage will be a monetary amount you will want to format in cents. (e.g., $5.00 is "500").
billed_at : datetime
When the usage record was billed on an invoice.
created_at : datetime
When the usage record was created in Recurly.
id : str
measured_unit_id : str
The ID of the measured unit associated with the add-on the usage record is for.
merchant_tag : str
Custom field for recording the id in your own system associated with the usage, so you can provide auditable usage displays to your customers using a GET on this endpoint.
object : str
Object type
percentage_tiers : :obj:`list` of :obj:`SubscriptionAddOnPercentageTier`
The percentage tiers of the subscription based on the usage_timestamp. If tier_type = flat, percentage_tiers = []
recording_timestamp : datetime
When the usage was recorded in your system.
tier_type : str
The pricing model for the add-on. For more information,
[click here](https://docs.recurly.com/docs/billing-models#section-quantity-based). See our
[Guide](https://developers.recurly.com/guides/item-addon-guide.html) for an overview of how
to configure quantity-based pricing models.
tiers : :obj:`list` of :obj:`SubscriptionAddOnTier`
The tiers and prices of the subscription based on the usage_timestamp. If tier_type = flat, tiers = []
unit_amount : float
Unit price
unit_amount_decimal : str
Unit price that can optionally support a sub-cent value.
updated_at : datetime
When the usage record was billed on an invoice.
usage_percentage : float
The percentage taken of the monetary amount of usage tracked. This can be up to 4 decimal places. A value between 0.0 and 100.0.
usage_timestamp : datetime
When the usage actually happened. This will define the line item dates this usage is billed under and is important for revenue recognition.
usage_type : str
Type of usage, returns usage type if `add_on_type` is `usage`.
"""
schema = {
"amount": float,
"billed_at": datetime,
"created_at": datetime,
"id": str,
"measured_unit_id": str,
"merchant_tag": str,
"object": str,
"percentage_tiers": ["SubscriptionAddOnPercentageTier"],
"recording_timestamp": datetime,
"tier_type": str,
"tiers": ["SubscriptionAddOnTier"],
"unit_amount": float,
"unit_amount_decimal": str,
"updated_at": datetime,
"usage_percentage": float,
"usage_timestamp": datetime,
"usage_type": str,
}
class ExportDates(Resource):
"""
Attributes
----------
dates : :obj:`list` of :obj:`str`
An array of dates that have available exports.
object : str
Object type
"""
schema = {"dates": list, "object": str}
class ExportFiles(Resource):
"""
Attributes
----------
files : :obj:`list` of :obj:`ExportFile`
object : str
Object type
"""
schema = {"files": ["ExportFile"], "object": str}
class ExportFile(Resource):
"""
Attributes
----------
href : str
A presigned link to download the export file.
md5sum : str
MD5 hash of the export file.
name : str
Name of the export file.
"""
schema = {"href": str, "md5sum": str, "name": str}
class DunningCampaign(Resource):
"""
Attributes
----------
code : str
Campaign code.
created_at : datetime
When the current campaign was created in Recurly.
default_campaign : bool
Whether or not this is the default campaign for accounts or plans without an assigned dunning campaign.
deleted_at : datetime
When the current campaign was deleted in Recurly.
description : str
Campaign description.
dunning_cycles : :obj:`list` of :obj:`DunningCycle`
Dunning Cycle settings.
id : str
name : str
Campaign name.
object : str
Object type
updated_at : datetime
When the current campaign was updated in Recurly.
"""
schema = {
"code": str,
"created_at": datetime,
"default_campaign": bool,
"deleted_at": datetime,
"description": str,
"dunning_cycles": ["DunningCycle"],
"id": str,
"name": str,
"object": str,
"updated_at": datetime,
}
class DunningCycle(Resource):
"""
Attributes
----------
applies_to_manual_trial : bool
Whether the dunning settings will be applied to manual trials. Only applies to trial cycles.
created_at : datetime
When the current settings were created in Recurly.
expire_subscription : bool
Whether the subscription(s) should be cancelled at the end of the dunning cycle.
fail_invoice : bool
Whether the invoice should be failed at the end of the dunning cycle.
first_communication_interval : int
The number of days after a transaction failure before the first dunning email is sent.
intervals : :obj:`list` of :obj:`DunningInterval`
Dunning intervals.
send_immediately_on_hard_decline : bool
Whether or not to send an extra email immediately to customers whose initial payment attempt fails with either a hard decline or invalid billing info.
total_dunning_days : int
The number of days between the first dunning email being sent and the end of the dunning cycle.
total_recycling_days : int
The number of days between a transaction failure and the end of the dunning cycle.
type : str
The type of invoice this cycle applies to.
updated_at : datetime
When the current settings were updated in Recurly.
version : int
Current campaign version.
"""
schema = {
"applies_to_manual_trial": bool,
"created_at": datetime,
"expire_subscription": bool,
"fail_invoice": bool,
"first_communication_interval": int,
"intervals": ["DunningInterval"],
"send_immediately_on_hard_decline": bool,
"total_dunning_days": int,
"total_recycling_days": int,
"type": str,
"updated_at": datetime,
"version": int,
}
class DunningInterval(Resource):
"""
Attributes
----------
days : int
Number of days before sending the next email.
email_template : str
Email template being used.
"""
schema = {"days": int, "email_template": str}
class DunningCampaignsBulkUpdateResponse(Resource):
"""
Attributes
----------
object : str
Object type
plans : :obj:`list` of :obj:`Plan`
An array containing all of the `Plan` resources that have been updated.
"""
schema = {"object": str, "plans": ["Plan"]}
class InvoiceTemplate(Resource):
"""
Attributes
----------
code : str
Invoice template code.
created_at : datetime
When the invoice template was created in Recurly.
description : str
Invoice template description.
id : str
name : str
Invoice template name.
updated_at : datetime
When the invoice template was updated in Recurly.
"""
schema = {
"code": str,
"created_at": datetime,
"description": str,
"id": str,
"name": str,
"updated_at": datetime,
}
|
the-stack_0_8400 | # Copyright 2011 Nicolas Maupu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Package acm.funcitonal
class curry(object):
'''Class to currify a function'''
def __init__(*args, **kw):
self = args[0]
self.fn, self.args, self.kw = (args[1], args[2:], kw)
def __call__(self, *args, **kw):
if kw and self.kw:
d = self.kw.copy()
d.update(kw)
else:
d = kw or self.kw
return self.fn(*(self.args + args), **d)
|
the-stack_0_8401 | #!/usr/bin/env python
from collections import defaultdict, namedtuple
import sys
import re
import os
import random
from itertools import chain
import extractor_util as util
import data_util as dutil
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('pa_abbrevs', 'text[]'),
('pheno_entities', 'text[]'),
('pa_section_ids', 'text[]'),
('pa_sent_ids', 'int[]')])
ExpandedRow = namedtuple('ExpandedRow', [
'doc_id',
'section_id',
'sent_id',
'words',
'lemmas',
'poses',
'ners',
'pa_abbrev',
'pheno_entity',
'pa_section_id',
'pa_sent_id'])
# This defines the output Mention object
Mention = namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'entity',
'words',
'is_correct'])
def expand_array_rows(array_row):
for i, pa_abbrev in enumerate(array_row.pa_abbrevs):
row = ExpandedRow(doc_id = array_row.doc_id,
section_id = array_row.section_id,
sent_id = array_row.sent_id,
words = array_row.words,
lemmas = array_row.lemmas,
poses = array_row.poses,
ners = array_row.ners,
pa_abbrev = pa_abbrev,
pheno_entity = array_row.pheno_entities[i],
pa_section_id = array_row.pa_section_ids[i],
pa_sent_id = array_row.pa_sent_ids[i])
yield row
### CANDIDATE EXTRACTION ###
SR = config.PHENO_ACRONYMS['SR']
def extract_candidate_mentions(row):
"""Extracts candidate phenotype mentions from an input row object"""
mentions = []
for i, word in enumerate(row.words):
if word == row.pa_abbrev:
mention_id = '%s_%s_%d_%d' % \
(row.doc_id, \
row.section_id, \
row.sent_id, \
i)
subtype = '%s_%s_%d_%s' % (row.doc_id, row.pa_section_id, row.pa_sent_id, row.pa_abbrev)
m = Mention(None, row.doc_id, row.section_id, row.sent_id,
[i], mention_id, "ABBREV", subtype, row.pheno_entity,
[word], True)
mentions.append(m)
return mentions
def generate_rand_negatives(row, pos, neg):
mentions = []
for i, word in enumerate(row.words):
if neg >= pos:
break
if word == row.pa_abbrev:
continue
if word.isupper() and word.strip() != '-LRB-' and word.strip() != '-RRB-':
mention_id = '%s_%s_%d_%d' % \
(row.doc_id, \
row.section_id, \
row.sent_id, \
i)
subtype = '%s_%s_%d_%s' % (row.doc_id, row.pa_section_id, row.pa_sent_id, row.pa_abbrev)
m = Mention(None, row.doc_id, row.section_id, row.sent_id,
[i], mention_id, 'ABBREV_RAND_NEG', subtype, None, [word], False)
neg += 1
return mentions
if __name__ == '__main__':
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
pos = 0
neg = 0
# Read TSV data in as Row objects
for line in sys.stdin:
array_row = parser.parse_tsv_row(line)
abbrevs = set()
for row in expand_array_rows(array_row):
if row.pa_abbrev in abbrevs:
continue
abbrevs.add(row.pa_abbrev)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# find candidate mentions & supervise
mentions = extract_candidate_mentions(row)
pos += len(mentions)
if SR.get('rand-negs'):
negs = generate_rand_negatives(row, pos, neg)
neg += len(negs)
mentions.extend(negs)
# print output
for mention in mentions:
util.print_tsv_output(mention)
#!/usr/bin/env python
|
the-stack_0_8402 | import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import TDANNet
from mmedit.models.losses import MSELoss
def test_tdan_model():
model_cfg = dict(
type='TDAN',
generator=dict(
type='TDANNet',
in_channels=3,
mid_channels=64,
out_channels=3,
num_blocks_before_align=5,
num_blocks_after_align=10),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
lq_pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
)
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'TDAN'
assert isinstance(restorer.generator, TDANNet)
assert isinstance(restorer.pixel_loss, MSELoss)
# prepare data
inputs = torch.rand(1, 5, 3, 8, 8)
targets = torch.rand(1, 3, 32, 32)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters()))
}
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 32, 32)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert isinstance(output, tuple)
assert torch.is_tensor(output[0])
assert output[0].size() == (1, 3, 32, 32)
assert torch.is_tensor(output[1])
assert output[1].size() == (1, 5, 3, 8, 8)
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
with torch.no_grad():
outputs = restorer(inputs.cuda(), test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
# test with metric and save image
if torch.cuda.is_available():
train_cfg = mmcv.ConfigDict(tsa_iter=1)
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'meta': [{
'gt_path': 'fake_path/fake_name.png',
'key': '000/00000000'
}]
}
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs.cuda(), test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
|
the-stack_0_8403 | #!/usr/bin/env python3
import torch
from .kernel import Kernel
from ..lazy import delazify
from ..constraints import Positive
class ScaleKernel(Kernel):
r"""
Decorates an existing kernel object with an output scale, i.e.
.. math::
\begin{equation*}
K_{\text{scaled}} = \theta_\text{scale} K_{\text{orig}}
\end{equation*}
where :math:`\theta_\text{scale}` is the `outputscale` parameter.
In batch-mode (i.e. when :math:`x_1` and :math:`x_2` are batches of input matrices), each
batch of data can have its own `outputscale` parameter by setting the `batch_shape`
keyword argument to the appropriate number of batches.
.. note::
The :attr:`outputscale` parameter is parameterized on a log scale to constrain it to be positive.
You can set a prior on this parameter using the :attr:`outputscale_prior` argument.
Args:
:attr:`base_kernel` (Kernel):
The base kernel to be scaled.
:attr:`batch_shape` (int, optional):
Set this if you want a separate outputscale for each batch of input data. It should be `b`
if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`
:attr:`outputscale_prior` (Prior, optional): Set this if you want to apply a prior to the outputscale
parameter. Default: `None`
:attr:`outputscale_constraint` (Constraint, optional): Set this if you want to apply a constraint to the
outputscale parameter. Default: `Positive`.
Attributes:
:attr:`base_kernel` (Kernel):
The kernel module to be scaled.
:attr:`outputscale` (Tensor):
The outputscale parameter. Size/shape of parameter depends on the :attr:`batch_shape` arguments.
Example:
>>> x = torch.randn(10, 5)
>>> base_covar_module = gpytorch.kernels.RBFKernel()
>>> scaled_covar_module = gpytorch.kernels.ScaleKernel(base_covar_module)
>>> covar = scaled_covar_module(x) # Output: LazyTensor of size (10 x 10)
"""
def __init__(self, base_kernel, outputscale_prior=None, outputscale_constraint=None, **kwargs):
super(ScaleKernel, self).__init__(has_lengthscale=False, **kwargs)
if outputscale_constraint is None:
outputscale_constraint = Positive()
self.base_kernel = base_kernel
outputscale = torch.zeros(*self.batch_shape) if len(self.batch_shape) else torch.tensor(0.)
self.register_parameter(name="raw_outputscale", parameter=torch.nn.Parameter(outputscale))
if outputscale_prior is not None:
self.register_prior(
"outputscale_prior", outputscale_prior, lambda: self.outputscale, lambda v: self._set_outputscale(v)
)
self.register_constraint("raw_outputscale", outputscale_constraint)
@property
def outputscale(self):
return self.raw_outputscale_constraint.transform(self.raw_outputscale)
@outputscale.setter
def outputscale(self, value):
self._set_outputscale(value)
def _set_outputscale(self, value):
if not torch.is_tensor(value):
value = torch.as_tensor(value).to(self.raw_outputscale)
self.initialize(raw_outputscale=self.raw_outputscale_constraint.inverse_transform(value))
def forward(self, x1, x2, last_dim_is_batch=False, diag=False, **params):
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
outputscales = self.outputscale
if last_dim_is_batch:
outputscales = outputscales.unsqueeze(-1)
if diag:
outputscales = outputscales.unsqueeze(-1)
return delazify(orig_output) * outputscales
else:
outputscales = outputscales.view(*outputscales.shape, 1, 1)
return orig_output.mul(outputscales)
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
|
the-stack_0_8404 | """
Run FragileX data synapse detections
"""
import os
import sys
import pandas as pd
from at_synapse_detection import dataAccess as da
from at_synapse_detection import SynapseDetection as syn
from at_synapse_detection import antibodyAnalysis as aa
from at_synapse_detection import SynapseAnalysis as sa
import socket
import multiprocessing as mp
import copy
import numpy as np
def run_list_of_queries(mouse_number, mouse_project_str, sheet_name):
"""
run queries in a parallel manner
Parameters
-----------------
mouse_number : int
mouse_project_str : str
sheet_name : str
"""
output_foldername = 'results_' + sheet_name
query_fn = 'queries/' + mouse_project_str + '_queries.json'
data_location = '/Users/anish/Documents/yi_mice/' + \
str(mouse_number) + 'ss_stacks/'
hostname = socket.gethostname()
if hostname == 'Galicia':
data_location = '/data5TB/yi_mice/' + str(mouse_number) + 'ss_stacks'
dapi_mask_str_base = '/data5TB/yi_mice/dapi-masks/' + \
str(mouse_number) + 'ss_stacks'
print('Query Filename: ', query_fn)
print('Data Location: ', data_location)
print('OutputFoldername: ', output_foldername)
print('Sheetname: ', sheet_name)
listOfQueries = syn.loadQueriesJSON(query_fn)
resolution = {'res_xy_nm': 100, 'res_z_nm': 70}
region_name_base = 'F00'
thresh = 0.9
result_list = []
num_workers = mp.cpu_count() - 1
print(num_workers)
pool = mp.Pool(num_workers)
atet_inputs_list = []
mask_location_str = -1
queryID = 0
foldernames = []
for region_num in range(0, 4):
region_name = region_name_base + str(region_num)
data_region_location = os.path.join(data_location, region_name)
dapi_mask_str = os.path.join(dapi_mask_str_base, region_name)
for nQuery, query in enumerate(listOfQueries):
foldername = region_name + '-Q' + str(nQuery)
foldernames.append(foldername)
print(foldername)
mask_location_str = -1
#dapi_mask_str = -1
atet_input = {'query': query, 'queryID': queryID, 'nQuery': nQuery, 'resolution': resolution,
'data_region_location': data_region_location, 'data_location': data_location,
'output_foldername': output_foldername, 'region_name': region_name,
'mask_str': mask_location_str, 'dapi_mask_str': dapi_mask_str, 'mouse_number': mouse_number}
atet_inputs_list.append(atet_input)
queryID = queryID + 1
# Run processes
result_list = pool.map(sa.run_synapse_detection, atet_inputs_list)
pool.close()
pool.join()
print('Get process results from the output queue')
sorted_queryresult = sa.organize_result_lists(result_list)
mouse_df = sa.create_synapse_df(sorted_queryresult, foldernames)
print(mouse_df)
fn = sheet_name + '.xlsx'
df_list = [mouse_df]
aa.write_dfs_to_excel(df_list, sheet_name, fn)
def main():
if len(sys.argv) < 4:
print('Run All Combinations')
print(sys.argv)
# mouse_number = 2
# mouse_project_str = '2ss'
# sheet_name = '2ss_fragX'
# python run_fragX.py 4 '4ss_inhibitory' '4ss_inhibitory_fragX'
# run_list_of_queries(
# mouse_number=1, mouse_project_str='1ss_inhibitory', sheet_name='1ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=22, mouse_project_str='22ss_inhibitory', sheet_name='22ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=2, mouse_project_str='2ss_inhibitory', sheet_name='2ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=3, mouse_project_str='3ss_inhibitory', sheet_name='3ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=4, mouse_project_str='4ss_inhibitory', sheet_name='4ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=6, mouse_project_str='6ss_inhibitory', sheet_name='6ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=5, mouse_project_str='5ss_inhibitory', sheet_name='5ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=7, mouse_project_str='7ss_inhibitory', sheet_name='7ss_inhibitory_fragX')
run_list_of_queries(
mouse_number=1, mouse_project_str='1ss', sheet_name='1ss_fragX')
run_list_of_queries(
mouse_number=22, mouse_project_str='22ss', sheet_name='22ss_fragX')
run_list_of_queries(
mouse_number=2, mouse_project_str='2ss', sheet_name='2ss_fragX')
run_list_of_queries(
mouse_number=3, mouse_project_str='3ss', sheet_name='3ss_fragX')
run_list_of_queries(
mouse_number=4, mouse_project_str='4ss', sheet_name='4ss_fragX')
run_list_of_queries(
mouse_number=6, mouse_project_str='6ss', sheet_name='6ss_fragX')
run_list_of_queries(
mouse_number=5, mouse_project_str='5ss', sheet_name='5ss_fragX')
run_list_of_queries(
mouse_number=7, mouse_project_str='7ss', sheet_name='7ss_fragX')
else:
print('we have arguments')
print(sys.argv)
mouse_number = sys.argv[1]
mouse_project_str = sys.argv[2]
sheet_name = sys.argv[3]
run_list_of_queries(mouse_number, mouse_project_str, sheet_name)
if __name__ == '__main__':
main()
|
the-stack_0_8406 | from flask import request, Blueprint, Response
from werkzeug.utils import secure_filename
from models import article
from app import db
from models.article import Article, article_schema, articles_schema
import codecs
articleRoute = Blueprint("articleRoute", __name__)
@articleRoute.route("/article/create", methods=["POST"])
def add_article():
article.Article.title = request.form["title"]
article.Article.body = request.form["body"]
article.Article.author = request.form["author"]
article.Article.categoria = request.form["categoria"]
multimedia = request.files["multimedia"]
if not multimedia:
return "No picture uploaded", 400
filename = secure_filename(multimedia.filename)
article.Article.filename = filename
mimetype = multimedia.mimetype
article.Article.mimetype = mimetype
article.Article.data = multimedia
if not filename:
return "Bad Upload!!", 400
# data = article.Article(article.Article.filename, article.Article.data.read(), article.Article.mimetype)
new_article = article.Article(
article.Article.title,
article.Article.body,
article.Article.author,
article.Article.categoria,
article.Article.filename,
article.Article.data.read(),
article.Article.mimetype
)
db.session.add(new_article)
db.session.commit()
return article.article_schema.jsonify(new_article)
@articleRoute.route("/article/<int:idArticle>")
def get_article(idArticle):
returnable = db.session.query(Article).get(idArticle)
returnable.data = codecs.encode(returnable.data, 'base64').decode('utf-8')
base64 = f"data:{returnable.mimetype};base64,{returnable.data}"
returnable.data = base64
return article_schema.dump(returnable)
@articleRoute.route("/article/all")
def get_all_article():
returnable = Article.query.all()
for article in returnable:
article.data = codecs.encode(article.data, 'base64').decode('utf-8')
base64 = f"data:{article.mimetype};base64,{article.data}"
article.data = base64
return articles_schema.jsonify(returnable)
@articleRoute.route("/article/delete/<int:idArticle>", methods=["DELETE"])
def delete_article(idArticle):
returnable = Article.query.get_or_404(idArticle)
db.session.delete(returnable)
db.session.commit()
return "", 204
@articleRoute.route("/article/edit/<int:idArticle>", methods=["POST"])
def edit_article(idArticle):
returnable = Article.query.get_or_404(idArticle)
if "title" in request.form:
returnable.title = request.form["title"]
if "body" in request.form:
returnable.body = request.form["body"]
if "author" in request.form:
returnable.author = request.form["author"]
db.session.commit()
return article_schema.dump(returnable), 200
|
the-stack_0_8408 | # Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Example of a PCI alias::
| [pci]
| alias = '{
| "name": "QuickAssist",
| "product_id": "0443",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| "numa_policy": "legacy"
| }'
Aliases with the same name, device_type and numa_policy are ORed::
| [pci]
| alias = '{
| "name": "QuickAssist",
| "product_id": "0442",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| }'
These two aliases define a device request meaning: vendor_id is "8086" and
product_id is "0442" or "0443".
"""
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import utils
LOG = logging.getLogger(__name__)
PCI_NET_TAG = 'physical_network'
PCI_TRUSTED_TAG = 'trusted'
PCI_DEVICE_TYPE_TAG = 'dev_type'
DEVICE_TYPE_FOR_VNIC_TYPE = {
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF
}
CONF = nova.conf.CONF
_ALIAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 256,
},
# TODO(stephenfin): This isn't used anywhere outside of tests and
# should probably be removed.
"capability_type": {
"type": "string",
"enum": ['pci'],
},
"product_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"vendor_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"device_type": {
"type": "string",
"enum": list(obj_fields.PciDeviceType.ALL),
},
"numa_policy": {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
},
"required": ["name"],
}
def _get_alias_from_config():
"""Parse and validate PCI aliases from the nova config.
:returns: A dictionary where the keys are device names and the values are
tuples of form ``(specs, numa_policy)``. ``specs`` is a list of PCI
device specs, while ``numa_policy`` describes the required NUMA
affinity of the device(s).
:raises: exception.PciInvalidAlias if two aliases with the same name have
different device types or different NUMA policies.
"""
jaliases = CONF.pci.alias
aliases = {} # map alias name to alias spec list
try:
for jsonspecs in jaliases:
spec = jsonutils.loads(jsonspecs)
jsonschema.validate(spec, _ALIAS_SCHEMA)
name = spec.pop('name').strip()
numa_policy = spec.pop('numa_policy', None)
if not numa_policy:
numa_policy = obj_fields.PCINUMAAffinityPolicy.LEGACY
dev_type = spec.pop('device_type', None)
if dev_type:
spec['dev_type'] = dev_type
if name not in aliases:
aliases[name] = (numa_policy, [spec])
continue
if aliases[name][0] != numa_policy:
reason = _("NUMA policy mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
if aliases[name][1][0]['dev_type'] != spec['dev_type']:
reason = _("Device type mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
aliases[name][1].append(spec)
except exception.PciInvalidAlias:
raise
except jsonschema.exceptions.ValidationError as exc:
raise exception.PciInvalidAlias(reason=exc.message)
except Exception as exc:
raise exception.PciInvalidAlias(reason=str(exc))
return aliases
def _translate_alias_to_requests(alias_spec, affinity_policy=None):
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
pci_requests = []
for name, count in [spec.split(':') for spec in alias_spec.split(',')]:
name = name.strip()
if name not in pci_aliases:
raise exception.PciRequestAliasNotDefined(alias=name)
count = int(count)
numa_policy, spec = pci_aliases[name]
policy = affinity_policy or numa_policy
# NOTE(gibi): InstancePCIRequest has a requester_id field that could
# be filled with the flavor.flavorid but currently there is no special
# handling for InstancePCIRequests created from the flavor. So it is
# left empty.
pci_requests.append(objects.InstancePCIRequest(
count=count,
spec=spec,
alias_name=name,
numa_policy=policy))
return pci_requests
def get_instance_pci_request_from_vif(context, instance, vif):
"""Given an Instance, return the PCI request associated
to the PCI device related to the given VIF (if any) on the
compute node the instance is currently running.
In this method we assume a VIF is associated with a PCI device
if 'pci_slot' attribute exists in the vif 'profile' dict.
:param context: security context
:param instance: instance object
:param vif: network VIF model object
:raises: raises PciRequestFromVIFNotFound if a pci device is requested
but not found on current host
:return: instance's PCIRequest object associated with the given VIF
or None if no PCI device is requested
"""
# Get PCI device address for VIF if exists
vif_pci_dev_addr = vif['profile'].get('pci_slot') \
if vif['profile'] else None
if not vif_pci_dev_addr:
return None
try:
cn_id = objects.ComputeNode.get_by_host_and_nodename(
context,
instance.host,
instance.node).id
except exception.NotFound:
LOG.warning("expected to find compute node with host %s "
"and node %s when getting instance PCI request "
"from VIF", instance.host, instance.node)
return None
# Find PCIDevice associated with vif_pci_dev_addr on the compute node
# the instance is running on.
found_pci_dev = None
for pci_dev in instance.pci_devices:
if (pci_dev.compute_node_id == cn_id and
pci_dev.address == vif_pci_dev_addr):
found_pci_dev = pci_dev
break
if not found_pci_dev:
return None
# Find PCIRequest associated with the given PCIDevice in instance
for pci_req in instance.pci_requests.requests:
if pci_req.request_id == found_pci_dev.request_id:
return pci_req
raise exception.PciRequestFromVIFNotFound(
pci_slot=vif_pci_dev_addr,
node_id=cn_id)
def get_pci_requests_from_flavor(flavor, affinity_policy=None):
"""Validate and return PCI requests.
The ``pci_passthrough:alias`` extra spec describes the flavor's PCI
requests. The extra spec's value is a comma-separated list of format
``alias_name_x:count, alias_name_y:count, ... ``, where ``alias_name`` is
defined in ``pci.alias`` configurations.
The flavor's requirement is translated into a PCI requests list. Each
entry in the list is an instance of nova.objects.InstancePCIRequests with
four keys/attributes.
- 'spec' states the PCI device properties requirement
- 'count' states the number of devices
- 'alias_name' (optional) is the corresponding alias definition name
- 'numa_policy' (optional) states the required NUMA affinity of the devices
For example, assume alias configuration is::
{
'vendor_id':'8086',
'device_id':'1502',
'name':'alias_1'
}
While flavor extra specs includes::
'pci_passthrough:alias': 'alias_1:2'
The returned ``pci_requests`` are::
[{
'count':2,
'specs': [{'vendor_id':'8086', 'device_id':'1502'}],
'alias_name': 'alias_1'
}]
:param flavor: The flavor to be checked
:param affinity_policy: pci numa affinity policy
:returns: A list of PCI requests
:rtype: nova.objects.InstancePCIRequests
:raises: exception.PciRequestAliasNotDefined if an invalid PCI alias is
provided
:raises: exception.PciInvalidAlias if the configuration contains invalid
aliases.
"""
pci_requests = []
if ('extra_specs' in flavor and
'pci_passthrough:alias' in flavor['extra_specs']):
pci_requests = _translate_alias_to_requests(
flavor['extra_specs']['pci_passthrough:alias'],
affinity_policy=affinity_policy)
return objects.InstancePCIRequests(requests=pci_requests)
|
the-stack_0_8410 | # encoding: utf-8
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from odps.df import DataFrame
from odps.ml import merge_data
from odps.ml.preprocess import *
from odps.ml.tests.base import MLTestBase, tn, ci_skip_case
IONOSPHERE_TABLE = tn('pyodps_test_ml_ionosphere')
IONOSPHERE_RANDOM_SAMPLE_TABLE = tn('pyodps_test_ml_iono_rand_sample')
IONOSPHERE_WEIGHTED_SAMPLE_TABLE = tn('pyodps_test_ml_iono_weight_sample')
IONOSPHERE_APPEND_ID_TABLE = tn('pyodps_test_ml_iono_append_id')
IONOSPHERE_MERGED_TABLE = tn('pyodps_test_ml_iono_merged')
IONOSPHERE_PRINCOMP_TABLE = tn('pyodps_test_ml_iono_princomp')
IONOSPHERE_ABNORMAL_TABLE = tn('pyodps_test_ml_iono_abnormal')
USER_ITEM_TABLE = tn('pyodps_test_ml_user_item')
USER_ITEM_UNPIVOT_TABLE = tn('pyodps_test_ml_unpivot_user_item')
class TestPreprocess(MLTestBase):
def setUp(self):
super(TestPreprocess, self).setUp()
self.create_ionosphere(IONOSPHERE_TABLE)
@ci_skip_case
def test_merge(self):
self.delete_table(IONOSPHERE_MERGED_TABLE)
ds = DataFrame(self.odps.get_table(IONOSPHERE_TABLE))
merged_df = merge_data(ds, ds, auto_rename=True)
merged_df.persist(IONOSPHERE_MERGED_TABLE)
assert self.odps.exist_table(IONOSPHERE_MERGED_TABLE)
@ci_skip_case
def test_sample(self):
self.delete_table(IONOSPHERE_WEIGHTED_SAMPLE_TABLE)
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).label_field('class')
df.sample(0.5, replace=True).persist(IONOSPHERE_RANDOM_SAMPLE_TABLE)
assert self.odps.exist_table(IONOSPHERE_RANDOM_SAMPLE_TABLE)
df['a01', 'a02', ((df.a05 + 1) / 2).rename('a05')].sample(0.5, prob_field='a05', replace=True).persist(
IONOSPHERE_WEIGHTED_SAMPLE_TABLE)
assert self.odps.exist_table(IONOSPHERE_WEIGHTED_SAMPLE_TABLE) |
the-stack_0_8412 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
# File: model_desc.py
from collections import namedtuple
import tensorflow as tf
from ..models.regularize import regularize_cost_from_collection
from ..tfutils.tower import get_current_tower_context
from ..tfutils.common import get_tf_version_tuple
from ..utils import logger
from ..utils.argtools import memoized_method
from ..utils.develop import log_deprecated
if get_tf_version_tuple() >= (1, 7):
from tensorflow.python.framework.tensor_spec import TensorSpec
__all__ = ['InputDesc', 'ModelDesc', 'ModelDescBase']
class InputDesc(
namedtuple('InputDescTuple', ['type', 'shape', 'name'])):
"""
Metadata about an input entry point to the graph.
This metadata can be later used to build placeholders or other types of
input source.
"""
def __new__(cls, type, shape, name):
"""
Args:
type (tf.DType):
shape (tuple):
name (str):
"""
shape = tuple(shape) # has to be tuple for "self" to be hashable
assert isinstance(type, tf.DType), type
if any(k in name for k in [':', '/', ' ']):
raise ValueError("Invalid InputDesc name: '{}'".format(name))
self = super(InputDesc, cls).__new__(cls, type, shape, name)
self._cached_placeholder = {}
return self
def _build_placeholder(self):
"""
Build a tf.placeholder from the metadata.
Returns:
tf.Tensor:
"""
with tf.name_scope(None): # clear any name scope it might get called in
ret = tf.placeholder(
self.type, shape=self.shape, name=self.name)
self._register_cached_placeholder(ret)
return ret
# cannot memoize here, because InputDesc is hashed by its fields.
def build_placeholder_reuse(self):
"""
Build a tf.placeholder from the metadata, or return an old one.
Returns:
tf.Tensor:
"""
g = tf.get_default_graph()
if g in self._cached_placeholder:
return self._cached_placeholder[g]
else:
return self._build_placeholder()
def _register_cached_placeholder(self, placeholder):
graph = placeholder.graph
assert graph not in self._cached_placeholder, \
"Placeholder for this InputDesc had been created before! This is a bug."
self._cached_placeholder[graph] = placeholder
@staticmethod
def _from_placeholder(placeholder):
name = placeholder.op.name
if name.endswith('_1') or name.endswith('_2'):
logger.error("Creating InputDesc from a placeholder named {}.".format(name))
logger.error("You might have mistakenly created this placeholder multiple times!")
ret = InputDesc(
placeholder.dtype,
tuple(placeholder.shape.as_list()),
name)
ret._register_cached_placeholder(placeholder)
return ret
@staticmethod
def _from_tensor_spec(spec):
assert spec.name is not None, "TensorSpec should have a name!"
return InputDesc(spec.dtype, tuple(spec.shape.as_list()), spec.name)
class ModelDescBase(object):
"""
Base class for a model description.
"""
@memoized_method
def get_inputs_desc(self):
"""
Returns:
A list of :class:`InputDesc`, which describes the inputs of this model.
The result is cached for each instance of :class:`ModelDescBase`.
"""
try:
ret = self._get_inputs()
log_deprecated(
"ModelDescBase._get_inputs() interface",
"Use inputs() instead!",
"2019-03-30")
return ret
except NotImplementedError:
with tf.Graph().as_default() as G: # create these placeholder in a temporary graph
inputs = self.inputs()
if isinstance(inputs[0], tf.Tensor):
for p in inputs:
assert p.graph == G, "Placeholders returned by inputs() should be created inside inputs()!"
return [InputDesc._from_placeholder(p) for p in inputs]
else:
for p in inputs:
assert isinstance(p, TensorSpec), type(p)
return [InputDesc._from_tensor_spec(p) for p in inputs]
@property
def input_names(self):
"""
Returns:
[str]: the names of all the inputs.
"""
return [k.name for k in self.get_inputs_desc()]
def _get_inputs(self):
raise NotImplementedError()
def inputs(self):
"""
Returns a list of :class:`tf.TensorSpec` or placeholders.
A subclass is expected to implement this method.
If returning placeholders,
the placeholders __have to__ be created inside this method.
Don't return placeholders created in other places.
Also, you should never call this method by yourself.
Returns:
list[tf.placeholder] or list[tf.TensorSpec], to be converted to :class:`InputDesc`.
"""
raise NotImplementedError()
def build_graph(self, *args):
"""
Build the whole symbolic graph.
This is supposed to be part of the "tower function" when used with :class:`TowerTrainer`.
A subclass is expected to implement this method.
Args:
args ([tf.Tensor]): tensors that matches the list of inputs defined by ``inputs()``.
Returns:
In general it returns nothing, but a subclass
may require it to return necessary information to build the trainer.
For example, `SingleCostTrainer` expect this method to return the cost tensor.
"""
assert len(args) == len(self.get_inputs_desc()), \
"Number of inputs passed to the graph != number of inputs defined " \
"in ModelDesc! ({} != {})".format(len(args), len(self.get_inputs_desc()))
log_deprecated(
"ModelDescBase._build_graph() interface",
"Use build_graph() instead!",
"2019-03-30")
return self._build_graph(args)
def _build_graph(self, inputs):
"""
This is an alternative interface which takes a list of tensors, instead of positional arguments.
By default :meth:`build_graph` will call this method.
"""
pass
class ModelDesc(ModelDescBase):
"""
A ModelDesc with **single cost** and **single optimizer**.
It has the following constraints in addition to :class:`ModelDescBase`:
1. :meth:`build_graph(...)` method should return a cost when called under a training context.
The cost will be the final cost to be optimized by the optimizer.
Therefore it should include necessary regularization.
2. Subclass is expected to implement :meth:`optimizer()` method.
"""
def get_cost(self):
"""
Being deprecated.
You're recommended to return a cost tensor in :meth:`build_graph` method directly.
This function takes the `self.cost` tensor defined by :meth:`build_graph`,
and applies the collection
``tf.GraphKeys.REGULARIZATION_LOSSES`` to the cost automatically.
"""
log_deprecated(
"get_cost() and self.cost",
"Return the cost tensor directly in build_graph() instead!",
"2019-03-30")
cost = self._get_cost()
reg_cost = regularize_cost_from_collection()
if reg_cost.op.type != 'Const':
logger.warn("Regularization losses found in collection, and a 'cost' tensor was "
"not returned by `build_graph`. Therefore applying regularization automatically!")
return tf.add(cost, reg_cost, name='cost_with_regularizer')
else:
return cost
def _get_cost(self, *args):
return self.cost
@memoized_method
def get_optimizer(self):
"""
Return the memoized optimizer returned by `optimizer()`.
Users of :class:`ModelDesc` will need to implement `optimizer()`,
which will only be called once per each model.
Returns:
a :class:`tf.train.Optimizer` instance.
"""
try:
ret = self._get_optimizer()
log_deprecated(
"ModelDescBase._get_optimizer() interface",
"Use optimizer() instead!",
"2019-03-30")
return ret
except NotImplementedError:
pass
return self.optimizer()
def _get_optimizer(self):
raise NotImplementedError()
def optimizer(self):
"""
Returns a `tf.train.Optimizer` instance.
A subclass is expected to implement this method.
"""
raise NotImplementedError()
def _build_graph_get_cost(self, *inputs):
"""
Equivalent to `build_graph`.
Used internally by trainers to get the final cost for optimization in a backward-compatible way.
"""
ret = self.build_graph(*inputs)
if not get_current_tower_context().is_training:
return None # this is the tower function, could be called for inference
if ret is not None:
return ret
else: # the old way, for compatibility
return self.get_cost()
|
the-stack_0_8414 | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
import subprocess
from distutils.cmd import Command
from setuptools import find_packages
try:
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
except ImportError:
from distutils.core import setup
from distutils.command.install import install
from distutils.command.build_py import build_py
NAME = "feast"
DESCRIPTION = "Python SDK for Feast"
URL = "https://github.com/feast-dev/feast"
AUTHOR = "Feast"
REQUIRES_PYTHON = ">=3.7.0"
REQUIRED = [
"Click==7.*",
"colorama>=0.3.9",
"fastavro>=1.1.0",
"google-api-core>=1.23.0",
"googleapis-common-protos==1.52.*",
"grpcio>=1.34.0",
"Jinja2>=2.0.0",
"jsonschema",
"mmh3",
"pandas>=1.0.0",
"pandavro==1.5.*",
"protobuf>=3.10",
"pyarrow>=2.0.0",
"pydantic>=1.0.0",
"PyYAML==5.3.*",
"tabulate==0.8.*",
"tenacity>=7.*",
"toml==0.10.*",
"tqdm==4.*",
]
GCP_REQUIRED = [
"google-cloud-bigquery>=2.0.*",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
"google-cloud-storage>=1.20.*",
"google-cloud-core==1.4.*",
]
REDIS_REQUIRED = [
"redis-py-cluster==2.1.2",
]
AWS_REQUIRED = [
"boto3==1.17.*",
]
CI_REQUIRED = [
"cryptography==3.3.2",
"flake8",
"black==19.10b0",
"isort>=5",
"grpcio-tools==1.34.0",
"grpcio-testing==1.34.0",
"mock==2.0.0",
"moto",
"mypy==0.790",
"mypy-protobuf==1.24",
"avro==1.10.0",
"gcsfs",
"urllib3>=1.25.4",
"pytest==6.0.0",
"pytest-cov",
"pytest-xdist",
"pytest-lazy-fixture==0.6.3",
"pytest-timeout==1.4.2",
"pytest-ordering==0.6.*",
"pytest-mock==1.10.4",
"Sphinx!=4.0.0",
"sphinx-rtd-theme",
"adlfs==0.5.9",
"firebase-admin==4.5.2",
"pre-commit",
"assertpy==1.1",
"google-cloud-bigquery>=2.0.*",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
"google-cloud-storage>=1.20.*",
"google-cloud-core==1.4.*",
"redis-py-cluster==2.1.2",
"boto3==1.17.*",
]
# README file from Feast repo root directory
repo_root = (
subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
.communicate()[0]
.rstrip()
.decode("utf-8")
)
README_FILE = os.path.join(repo_root, "README.md")
#with open(README_FILE, "r") as f:
LONG_DESCRIPTION = "Feast Feast Feast"
# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
# Regex modified from default tag regex in:
# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
TAG_REGEX = re.compile(
r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
)
class BuildProtoCommand(Command):
description = "Builds the proto files into python files."
def initialize_options(self):
self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
self.proto_folder = os.path.join(repo_root, "protos")
self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
self.sub_folders = ["core", "serving", "types", "storage"]
def finalize_options(self):
pass
def _generate_protos(self, path):
proto_files = glob.glob(os.path.join(self.proto_folder, path))
subprocess.check_call(self.protoc + [
'-I', self.proto_folder,
'--python_out', self.this_package,
'--grpc_python_out', self.this_package,
'--mypy_out', self.this_package] + proto_files)
def run(self):
for sub_folder in self.sub_folders:
self._generate_protos(f'feast/{sub_folder}/*.proto')
from pathlib import Path
for path in Path('feast/protos').rglob('*.py'):
for folder in self.sub_folders:
# Read in the file
with open(path, 'r') as file:
filedata = file.read()
# Replace the target string
filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
# Write the file out again
with open(path, 'w') as file:
file.write(filedata)
class BuildCommand(build_py):
"""Custom build command."""
def run(self):
self.run_command('build_proto')
build_py.run(self)
class DevelopCommand(develop):
"""Custom develop command."""
def run(self):
self.run_command('build_proto')
develop.run(self)
setup(
name=NAME,
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=("tests",)),
install_requires=REQUIRED,
# https://stackoverflow.com/questions/28509965/setuptools-development-requirements
# Install dev requirements with: pip install -e .[dev]
extras_require={
"dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
"ci": CI_REQUIRED,
"gcp": GCP_REQUIRED,
"aws": AWS_REQUIRED,
"redis": REDIS_REQUIRED,
},
include_package_data=True,
license="Apache",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf", "sphinx!=4.0.0"],
package_data={
"": [
"protos/feast/**/*.proto",
"protos/feast/third_party/grpc/health/v1/*.proto",
"protos/tensorflow_metadata/proto/v0/*.proto",
"feast/protos/feast/**/*.py",
"tensorflow_metadata/proto/v0/*.py"
],
},
cmdclass={
"build_proto": BuildProtoCommand,
"build_py": BuildCommand,
"develop": DevelopCommand,
},
)
|
the-stack_0_8415 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 14:42:14 2017
This downloads and unzips the wage data by MSA and States from the BLS website
@author: carrie
"""
from bs4 import BeautifulSoup
import requests, urllib.request, shutil, zipfile
import datetime, os, time
#import re, webbrowser
#import schedule
#import datetime
#import time
#
## Obtain current time
#start = datetime.datetime.now()
#
## Simple callable for example
#class DummyClock:
# def __call__(self):
# print datetime.datetime.now()
#
#schedule.every(1).seconds.do(DummyClock())
#
#while True:
# schedule.run_pending()
# # 5 minutes == 300 seconds
# if (datetime.datetime.now() - start).seconds >= 300:
# break
# # And here we halt execution for a second
# time.sleep(1)
class BLSWages:
'''Download the zipped folders from BLS with wage data from Metro Areas and the State'''
#BLS Data Source
BLS_url = 'https://www.bls.gov/oes/tables.htm'
BLS_main_link = 'https://www.bls.gov/'
page = requests.get(BLS_url)
titleShouldBe = "Tables Created by BLS"
#Todays Date
now = datetime.datetime.now()
formatTime = now.strftime("%Y-%m-%d %H:%M")
print("Running BLS Wage Web scraper: {0}".format(formatTime))
#First test is if the page will load
def PageStatus(self):
status = self.page.status_code
soup = ""
if status == 200:
soup = BeautifulSoup(self.page.text, 'html.parser')
self.CheckElementonWebsite(soup, self.titleShouldBe)
print("Downloading...")
self.DownloadStateData(soup)
time.sleep(2)
self.DownloadMetroData(soup)
else:
print("Page will not load")
log = open("Error_Data.txt","a")
log.write("Error on Page Load: Page status is " + " " + str(status) + "\t" + "Date: " + self.formatTime + "\n")
#Check if the page title has changed if so the rest of the page and downloads may have changed so log the issue
def CheckElementonWebsite(self, soup, titletoCheckAgainst ):
title = soup.title.string
if title == titletoCheckAgainst:
print("Title of web page check passed: {0}".format(soup.title.string))
else:
print("Title on BLSWages website changed")
log = open("Error_Data.txt","a")
log.write("Title on Website has changed from '" + str(titletoCheckAgainst) + "' to '" + str(title) + "' \t" + "Date: " + self.formatTime + "\n")
def GetFileNamesfromDirectory(self):
dirpath = os.getcwd()
print(dirpath+"\log")
for file in os.listdir(dirpath+"\log"):
print(file)
if file.endswith(".zip"):
print(os.path.join(dirpath+"\log", file))
return file
#Download BLS Data unzip it and delete the zip container
def DownloadMetroData(self, soup):
body = soup.find("div", {"id": "bodytext"})
links = body.find_all('a', href=True)[6]
href = links['href']
url = self.BLS_main_link+href
print(url)
dir_path = os.path.dirname(os.path.realpath(__file__))
bLS_WageMetro = os.path.join(os.path.sep, dir_path, 'log', 'BLS_WageMetro.zip')
folder = os.path.join(os.path.sep, dir_path, 'log')
with urllib.request.urlopen(url) as response, open(bLS_WageMetro, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
#Extract files from zip
with zipfile.ZipFile(bLS_WageMetro) as zf:
zf.extractall(folder)
#Remove the zip file and remove unnecissary files
os.remove(bLS_WageMetro)
#webbrowser.open(url)
#if href == "/oes/special.requests/oesm16ma.zip":
# print("Data for May 2016 allready downloaded" + href)
#Download BLS Data unzip it and delete the zip container
def DownloadStateData(self, soup):
body = soup.find("div", {"id": "bodytext"})
links = body.find_all('a', href=True)[4]
href = links['href']
url = self.BLS_main_link+href
print(url)
dir_path = os.path.dirname(os.path.realpath(__file__))
bLS_WageState = os.path.join(os.path.sep, dir_path, 'log', 'BLS_WageState.zip')
folder = os.path.join(os.path.sep, dir_path, 'log')
with urllib.request.urlopen(url) as response, open(bLS_WageState, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
#Extract files from zip
time.sleep(8)
z = zipfile.ZipFile(bLS_WageState)
z.extractall(folder)
z.close()
del z
os.unlink(bLS_WageState)
##MAIN
#wages = BLSWages()
#wages.PageStatus()
|
the-stack_0_8416 | """Command line tools to interact with the Insteon devices."""
from .. import devices
from ..constants import RAMP_RATES, ALDBStatus, DeviceCategory
from ..managers.scene_manager import async_add_device_to_scene
from ..utils import seconds_to_ramp_rate
from .advanced import AdvancedTools
from .tools_base import ToolsBase
class ToolsAldb(ToolsBase):
"""Command class to test interactivity."""
async def do_load_aldb(self, *args, **kwargs):
"""Load the All-Link Database of a device.
Usage:
load_aldb <ADDRESS>|all y|n Load one or all devices (can be the modem address)
To clear the current ALDB and reload from the device, enter `y` as the second argment.
Otherwise enter `n`.
"""
args = args[0].split()
try:
address = args[0]
except IndexError:
address = None
try:
refresh_yn = args[1]
refresh = refresh_yn.lower() == "y"
except IndexError:
refresh_yn = ""
addresses = await self._get_addresses(
address=address, allow_cancel=True, allow_all=True, match_device=True
)
if not addresses:
return
if devices[addresses[0]] != devices.modem or len(addresses) > 1:
if not refresh_yn:
refresh_yn = await self._get_char(
"Clear existing records and reload (y/n)",
default="n",
values=["y", "n"],
)
refresh = refresh_yn.lower() == "y"
battery_devices = []
for address in addresses:
if devices[address].is_battery:
battery_devices.append(address)
# Only load the modem ALDB if explicitly asked
if devices[address] == devices.modem and len(addresses) == 1:
await devices.modem.aldb.async_load()
elif devices[address].cat == 0x03:
pass
else:
# tasks.append(devices[address].aldb.async_load(refresh=refresh))
await devices[address].aldb.async_load(refresh=refresh)
if battery_devices:
self._log_stdout("The following devices are battery operated.")
self._log_stdout("They will load in the background when they wake up.")
for address in battery_devices:
self._log_stdout(f" - {address}")
# if the device did not load the first time, try one more time with refresh
for address in addresses:
if (
devices[address] != devices.modem
and devices[address].aldb.status != ALDBStatus.LOADED
and not devices[address].is_battery
):
await devices[address].aldb.async_load(refresh=refresh)
async def do_print_aldb(self, *args, **kwargs):
"""Print the records in an All-Link Database.
Usage:
print_aldb <ADDRESS>|all
"""
await self._print_aldb(*args)
async def do_add_default_links(self, *args, **kwargs):
"""Add default links between a device and the modem.
Usage:
add_default_links <ADDRESS>
"""
args = args[0].split()
try:
address = args[0]
except IndexError:
address = None
addresses = await self._get_addresses(
address=address, allow_all=False, allow_cancel=True, match_device=True
)
if not addresses:
return
device = devices[addresses[0]]
self._log_command(f"add_default_links {addresses[0]}")
await device.async_add_default_links()
async def do_add_device_to_scene(self, *args, **kwargs):
"""Add a device to a scene.
Usage:
add_device_to_scene <ADDRESS> <SCENE NUMBER> [<ON LEVEL>] [<RAMP RATE>] [<BUTTON>] | [Data1] [Data2] [Data3]
<ADDRESS>: The device address such as 1a.2b.3c
<SCENE NUMBER>: Value from 25 to 255.
For Device type 1:
<ON LEVEL>: (Optional) Value from 0 (off) - 255 (full on).
For dimmable devices any number from 0 to 255 is allowable.
Default is 255.
<RAMP RATE>: 0.1 seconds to 480 seconds (8 minutes)
Default is 0.5 seconds
<BUTTON>: (Optional) The button or group number of the device to change as part of the scene.
Valid values are device dependant.
Default is 1.
for Device type 2:
<Data1>: (Optional) Value from 0 (off) - 255 (full on).
For on/off devices only 0 and 255 are allowed.
Default is 255.
<Data2>: Data field 2. Default is 0. Typically, this is not used by device type 2.
<BUTTON>: (Optional) The button or group number of the device to change as part of the scene.
Valid values are device dependant.
Default is 1.
For all other device types:
<Data1>: Data field 1. Any value from 0 - 255 are allowed. Default is 255.
<Data2>: Data field 2 Any value from 0 - 255 are allowed. Default is 0.
<Data3>: Data field 3 Any value from 0 - 255 are allowed. Default is 1.
KeyPadLinc devices will not respond correctly to scenes in this way other than the main power.
"""
args = args[0].split()
try:
address = args[0]
except IndexError:
address = None
try:
scene = int(args[1])
except (IndexError, ValueError):
scene = None
try:
data1 = int(args[2])
except (IndexError, ValueError):
data1 = None
try:
data2 = int(args[3])
except (IndexError, ValueError):
data2 = None
try:
data3 = int(args[4])
except (IndexError, ValueError):
data3 = None
addresses = await self._get_addresses(
address=address, allow_all=False, allow_cancel=True, match_device=True
)
if not addresses:
return
device = devices[addresses[0]]
if not scene:
scene = await self._get_int(
"Scene number or blank to cancel",
values=range(25, 256),
)
if not scene:
return
if data1 is None:
if device.cat == DeviceCategory.DIMMABLE_LIGHTING_CONTROL:
data1 = await self._get_int(
"On level", default=255, values=range(0, 256)
)
elif device.cat == DeviceCategory.SWITCHED_LIGHTING_CONTROL:
data1 = await self._get_int("On level", default=255, values=[0, 255])
else:
data1 = await self._get_int("Data1", default=255, values=range(0, 255))
if device.cat == DeviceCategory.DIMMABLE_LIGHTING_CONTROL:
if data2 is None:
try:
data2_seconds = float(args[3])
except (IndexError, ValueError):
data2_seconds = None
if data2_seconds is None:
data2_seconds = await self._get_float(
"Ramp rate",
default=0.5,
maximum=480,
minimum=0.1,
)
else:
data2_seconds = data2
data2 = seconds_to_ramp_rate(data2_seconds)
if RAMP_RATES[data2] != data2_seconds:
self._log_stdout(
f"Ramp rate rounded to {RAMP_RATES[data2]} to conform to standard values."
)
elif data2 is None:
data2 = await self._get_int("Data2", default=0, values=range(0, 255))
if data3 is None:
if device.cat in [
DeviceCategory.DIMMABLE_LIGHTING_CONTROL,
DeviceCategory.SWITCHED_LIGHTING_CONTROL,
]:
data3 = await self._get_int("Button", default=1, values=range(0, 255))
else:
data3 = await self._get_int("Data3", default=0, values=range(0, 255))
await async_add_device_to_scene(device, scene, data1, data2, data3)
def do_print_aldb_load_status(self, *args, **kwargs):
"""Print the All-Link databbase load status for all devices."""
self._log_stdout("")
self._log_stdout("Device Status")
self._log_stdout("-------- ---------------")
for address in devices:
self._log_stdout(f"{address} {str(devices[address].aldb.status)}")
async def do_advanced(self, *args, **kwargs):
"""Enter advanced ALDB menu."""
self._log_command("advanced")
await self._call_next_menu(AdvancedTools, "advanced")
|
the-stack_0_8418 | from gym.spaces import Discrete, Box
from gym_electric_motor.physical_systems.electric_motors import DcShuntMotor, DcExternallyExcitedMotor, \
DcPermanentlyExcitedMotor, DcSeriesMotor
from gym_electric_motor.physical_systems import SynchronousMotorSystem
import math
import numpy as np
class Controller:
@classmethod
def make(cls, controller_type, environment, **controller_kwargs):
assert controller_type in _controllers.keys(), f'Controller {controller_type} unknown'
controller = _controllers[controller_type](environment, **controller_kwargs)
return controller
def control(self, state, reference):
raise NotImplementedError
def reset(self):
pass
class OnOffController(Controller):
def __init__(self, environment, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Discrete, 'Not suitable action space for On off controller'
self._high_action = 1
if action_space.n in [3, 4]:
self._low_action = 2
else:
self._low_action = 0
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._ref_idx = reference_idx
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx]:
return self._high_action
else:
return self._low_action
class ThreePointController(Controller):
def __init__(self, environment, hysteresis=0.01, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Discrete, 'Not suitable action space for three point controller'
self._hysteresis = hysteresis
self._high_action = 1
self._idle_action = 0
self._ref_idx = reference_idx
if action_space.n in [3, 4]:
self._low_action = 2
else:
self._low_action = 0
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx] - self._hysteresis:
return self._high_action
elif state[self._referenced_state] > reference[self._ref_idx] + self._hysteresis:
return self._low_action
else:
return self._idle_action
class PController(Controller):
def __init__(self, environment, k_p=10, controller_no=0, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Box, 'No suitable action space for P Controller'
self._k_p = k_p
self._controller_no = controller_no
self._action_min = action_space.low[controller_no]
self._action_max = action_space.high[controller_no]
self._ref_idx = reference_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
def control(self, state, reference):
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_p * (reference[self._ref_idx] - state[self._referenced_state])
)
)
])
class PIController(PController):
def __init__(self, environment, k_p=10, k_i=0.01, controller_no=0, reference_idx=0):
super().__init__(environment, k_p, controller_no, reference_idx)
self._k_i = k_i
self._tau = environment.physical_system.tau
self._integrated_value = 0
def control(self, state, reference):
diff = reference[self._ref_idx] - state[self._referenced_state]
self._integrated_value += diff * self._tau
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_p * (reference[0] - state[self._referenced_state])
+ self._k_i / self._tau * self._integrated_value
)
)
])
def reset(self, **__):
self._integrated_value = 0
class PmsmOnOffController(Controller):
def __init__(self, environment, state_idx=None, ref_idx=0):
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._l_q = environment.physical_system.electrical_motor.motor_parameter['l_q']
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._ref_idx = ref_idx
self._omega_idx = environment.physical_system.state_positions['omega']
self._u_sup = environment.physical_system.supply.u_nominal
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._limits = environment.physical_system.electrical_motor.limits
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx]:
u_q = 1
else:
u_q = -1
epsilon = np.pi * state[self._epsilon_idx]
u_d = 0
u_a, u_b, u_c = self._backward_transformation((u_q, u_d), epsilon)
return 4 * (u_a > 0) + 2 * (u_b > 0) + (u_c > 0)
class SynRmOnOffController(PmsmOnOffController):
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx]:
u_q = 1
u_d = 1
else:
u_q = -1
u_d = -1
epsilon = state[self._epsilon_idx]
u_a, u_b, u_c = self._backward_transformation((u_q, u_d), epsilon)
return 4 * u_a > 0 + 2 * u_b > 0 + u_c > 0
class CascadedPIController(Controller):
def __init__(self, environment, ref_idx=0):
self._omega_idx = environment.physical_system.OMEGA_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._voltages_idx = environment.physical_system.VOLTAGES_IDX
self._u_a_idx = self._voltages_idx[0]
self._i_a_idx = self._currents_idx[0]
if len(self._currents_idx) > 1:
self._i_e_idx = environment.physical_system.state_positions['i_e']
else:
self._i_e_idx = environment.physical_system.state_positions['i']
if len(self._voltages_idx) > 1:
self._u_e_idx = environment.physical_system.state_positions['u_e']
else:
self._u_e_idx = None
self._limits = environment.physical_system.limits
self._ref_idx = ref_idx
self._tau = environment.physical_system.tau
mp = environment.physical_system.electrical_motor.motor_parameter
t_motor = mp['l_a'] / mp['r_a']
t_t = 3 / 2 * self._tau
r_motor = mp['r_a']
self._i_a_max = 0
self._i_a_min = 0
self._u_a_max = 0
self._u_a_min = 0
self._integrated_values = [0, 0]
self._converter_voltages = environment.physical_system.converter.voltages
self._i_a_max = self._limits[self._i_a_idx] * environment.physical_system.state_space.high[self._i_a_idx]
self._i_a_min = self._limits[self._i_a_idx] * environment.physical_system.state_space.low[self._i_a_idx]
if 'psi_e' in mp.keys():
self._psi_e = mp['psi_e']
self._i_e_max_prime = None
elif 'l_e_prime' in mp.keys():
self._psi_e = None
self._i_e_max_prime = self._limits[self._currents_idx[-1]] * mp['l_e_prime']
else:
raise Exception('Motor Parameter Error. No psi_e and no l_e_prime entry found in motor parameters')
self._u_a_max = self._limits[self._u_a_idx] * environment.physical_system.state_space.high[self._u_a_idx]
self._u_a_min = self._limits[self._u_a_idx] * environment.physical_system.state_space.low[self._u_a_idx]
# compute motor type specific parameter
# use inner_ and outer_gain_adjustment to adjust the integral part gains for better control behaviour
# Gains chosen as given in "Elektrische Antriebe - Regelung von Antriebssystemen", D. Schröder, 2009
if type(environment.physical_system.electrical_motor) == DcPermanentlyExcitedMotor:
inner_gain_adjustment = 1e-3
outer_gain_adjustment = 1e-3
elif type(environment.physical_system.electrical_motor) == DcSeriesMotor:
t_motor = (mp['l_a'] + mp['l_e']) / (mp['r_a'] + mp['r_e'])
r_motor = (mp['r_a'] + mp['r_e'])
inner_gain_adjustment = 1
outer_gain_adjustment = 1
elif type(environment.physical_system.electrical_motor) == DcExternallyExcitedMotor:
inner_gain_adjustment = 1E-4
outer_gain_adjustment = 1E-3
elif type(environment.physical_system.electrical_motor) == DcShuntMotor:
inner_gain_adjustment = 1E-2
outer_gain_adjustment = 1
else:
raise Exception('Unknown Motor')
# set up gains for the controller
# Integral gains are multiplied by the sampling time to simplify the computation during control
t_sigma = min(t_motor, t_t)
t_1 = max(t_motor, t_t)
v_s = 1 / r_motor
# Integral Inner loop
self._k_i_i = 1 / (2 * t_sigma * v_s) * self._tau * inner_gain_adjustment
# Proportional Inner loop
self._k_p_i = t_1 / (2 * t_sigma * v_s)
# Integral Outer loop
j = environment.physical_system.mechanical_load.j_total
self._k_i_o = (
j / (32 * t_sigma ** 2)
* self._tau * outer_gain_adjustment
)
# Proportional Outer loop
self._k_p_o = j / (4 * t_sigma)
def control(self, state, reference):
# denormalize quantities
omega = state[self._omega_idx] * self._limits[self._omega_idx]
omega_ref = reference[self._ref_idx] * self._limits[self._omega_idx]
i_a = state[self._i_a_idx] * self._limits[self._i_a_idx]
psi_e = self._psi_e or state[self._i_e_idx] * self._i_e_max_prime
# outer control loop
d_omega = omega_ref - omega
if psi_e != 0:
temp = self._integrated_values[0] + d_omega * self._k_i_o / psi_e # integral part
i_a_des = temp + d_omega * self._k_p_o / psi_e
else:
i_a_des = math.copysign(1, d_omega) * self._i_a_max
temp = self._integrated_values[0]
# hold current constraints, anti wind-up
if i_a_des > self._i_a_max or i_a_des < self._i_a_min:
i_a_des = min(max(i_a_des, self._i_a_min), self._i_a_max)
else:
self._integrated_values[0] = temp
d_i_a = i_a_des - i_a
# inner control loop
temp = self._integrated_values[1] + d_i_a * self._k_i_i # integral part
d_u_a = temp + d_i_a * self._k_p_i
u_a_0 = omega * psi_e
u_a = d_u_a + u_a_0
# hold voltage limits, anti wind-up
if u_a > self._u_a_max or u_a < self._u_a_min:
u_a = min(max(u_a, self._u_a_min), self._u_a_max)
else:
self._integrated_values[1] = temp
# normalize the desired output voltage to a duty cycle referring to the supply voltage
# Assumption: u_sup = u_N is made
des_duty_cycle = u_a / self._limits[self._u_a_idx]
duty_cycle = min(
max(des_duty_cycle, self._u_a_min / self._limits[self._u_a_idx]),
self._u_a_max / self._limits[self._u_a_idx])
return np.array([duty_cycle])
class FOCController(Controller):
def __init__(self, environment, ref_idx=0, weight=1):
assert type(environment.physical_system) is SynchronousMotorSystem
self._ref_idx = ref_idx
self._weight = weight
self._omega_idx = environment.physical_system.OMEGA_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._voltages_idx = environment.physical_system.VOLTAGES_IDX
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._limits = environment.physical_system.limits
self._tau = environment.physical_system.tau
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._motor_parameter = environment.physical_system.electrical_motor.motor_parameter
mp = self._motor_parameter
# current controller i_d
t_motor_d = mp['l_d'] / mp['r_s']
tau = environment.physical_system.tau
t_t = 3 / 2 * tau
t_1_d = max(t_motor_d, t_t)
t_sigma_d = min(t_motor_d, t_t)
v_s_d = 1 / mp['r_s']
# current controller i_q
t_motor_q = mp['l_q'] / mp['r_s']
t_1_q = max(t_motor_q, t_t)
t_sigma_q = min(t_motor_q, t_t)
v_s_q = 1 / mp['r_s']
# outer speed controller
t_2 = 2 * t_sigma_q
t_1_s = environment.physical_system.mechanical_load.j_total
v_s_s = 3 / 2 * mp['p'] * mp['psi_p']
self._k_i_t = 2 * t_1_s / v_s_s * tau # integral gain speed controller.
self._k_p_t = t_1_s / (2 * t_2 * v_s_s) # prop. gain speed controller
self._k_i_d = 1 / (2 * t_sigma_d * v_s_d) * tau # integral gain i_sd controller.
self._k_p_d = t_1_d / (2 * t_sigma_d * v_s_d) # prop. gain i_sd controller
self._k_i_q = 1 / (2 * t_sigma_q * v_s_q) * tau # integral gain i_sq controller.
self._k_p_q = t_1_q / (2 * t_sigma_q * v_s_q) # prop. gain i_sq controller
# specify max values for normalisation and anti wind up
# an anti wind up scheme is necessary for good control behaviour to limit the integral parts in case of
# limit violations of the desired input voltage
# maximum speed without flux weakening
self._omega_1 = (
self._limits[self._voltages_idx][0] / mp['l_q'] / np.sqrt(self._limits[self._currents_idx][0]) ** 2
+ mp['psi_p'] ** 2 / mp['l_q'] ** 2
)
self._integrated_values = [0, 0, 0]
def reset(self):
self._integrated_values = [0, 0, 0]
def control(self, state, reference):
"""
Field oriented control from the lecture "controlled three phase drives, chapter 5"
"""
# extract quantities from state
mp = self._motor_parameter
omega = state[self._omega_idx] * self._limits[self._omega_idx]
omega_ref = reference[self._ref_idx] * self._limits[self._omega_idx]
u = state[self._voltages_idx] * self._limits[self._voltages_idx]
epsilon = state[self._epsilon_idx] * self._limits[self._epsilon_idx]
i = state[self._currents_idx] * self._limits[self._currents_idx]
# transformation from a/b/c to alpha/beta and d/q
i_qd = self._forward_transformation(i, epsilon)
# compute u_d_0 and u_q_0
u_d_0 = omega * mp['l_q'] * i_qd[0]
u_q_0 = omega * (mp['psi_p'] + mp['l_d'] * i_qd[1])
d_omega = omega_ref - omega
# compute T* (Torque reference) and i*_sq (q-axis current reference)
temp = self._integrated_values[0] + d_omega * self._k_i_t # integral part
t_des = temp + d_omega * self._k_p_t # proportional part
i_sq_des = 2 * t_des / (3 * mp['p'] * mp['psi_p'])
# anti wind-up
if i_sq_des > self._limits[self._currents_idx[0]] * self._weight\
or i_sq_des < -self._limits[self._currents_idx[0]] * self._weight:
i_sq_des = min(
max(i_sq_des, -self._limits[self._currents_idx[0]] * self._weight),
self._limits[self._currents_idx[0]] * self._weight
)
else:
self._integrated_values[0] = temp
if abs(omega_ref) < self._omega_1:
i_sd_des = 0
else:
i_sd_des = (
(self._limits[self._voltages_idx[0]] / omega_ref) ** 2
- (mp['l_q'] * self._limits[self._currents_idx[0]]) ** 2 - mp['psi_p'] ** 2
/ (2 * mp['psi_p'] * mp['l_d']))
# transform back to abc-domain
currents = self._backward_transformation((i_sq_des, i_sd_des), epsilon)
# test if current limits are violated
if np.max(np.abs(currents)) > self._limits[self._currents_idx[0]]:
clipping = self._limits[self._currents_idx]
currents = np.clip(currents, -clipping, clipping)
array = self._forward_transformation(currents, epsilon)
i_sd_des = array[1]
i_sq_des = array[0]
# compute du*_sq, du*_sd
d_i_sd = i_sd_des - i_qd[1]
d_i_sq = i_sq_des - i_qd[0]
temp_u_sd = self._integrated_values[1] + d_i_sd * self._k_i_d # integral part
temp_u_sq = self._integrated_values[2] + d_i_sq * self._k_i_q # integral part
d_u_sd_des = temp_u_sd + d_i_sd * self._k_p_d
d_u_sq_des = temp_u_sq + d_i_sq * self._k_p_q
# anti-wind-up u_sd
if d_u_sd_des > self._limits[self._voltages_idx[0]] * self._weight - u_d_0 or \
d_u_sd_des < -self._limits[self._voltages_idx[0]] * self._weight - u_d_0:
d_u_sd_des = np.clip(d_u_sd_des, -self._limits[self._voltages_idx[0]] * self._weight - u_d_0,
self._limits[self._voltages_idx[0]] * self._weight - u_d_0)
else:
self._integrated_values[1] = temp_u_sd
# anti-wind-up u_sq
if d_u_sq_des > self._limits[self._voltages_idx[0]] * self._weight - u_q_0 or \
d_u_sq_des < -self._limits[self._voltages_idx[0]] * self._weight - u_q_0:
d_u_sq_des = np.clip(d_u_sq_des, -self._limits[self._voltages_idx[0]] * self._weight - u_q_0,
self._limits[self._voltages_idx[0]] * self._weight - u_q_0)
else:
self._integrated_values[2] = temp_u_sq
# compute u*_sq, u*_sd, epsilon + d_epsilon due to delay of the controller
u_sd_des = u_d_0 + d_u_sd_des
u_sq_des = d_u_sq_des + u_q_0
epsilon_shift = epsilon + 3 / 2 * self._tau * omega
# from d/q to alpha/beta and a/b/c
u_qd_des = np.array([u_sq_des, u_sd_des])
voltages = self._backward_transformation(u_qd_des, epsilon_shift)
# normalise inputs
result = np.clip(voltages / self._limits[self._voltages_idx[0]], -1, 1)
return result
class PmsmPController(Controller):
def __init__(self, environment, state_idx=None, ref_idx=0, k_p=1):
self._k_p = k_p
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._ref_idx = ref_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._phase = 0
def control(self, state, reference):
u_q = min(1, max(-1, self._k_p * reference[self._ref_idx] - state[self._referenced_state]))
epsilon = np.pi * state[self._epsilon_idx]
u_d = 0
u_a, u_b, u_c = self._backward_transformation((u_q, u_d), epsilon)
return [u_a, u_b, u_c]
def reset(self):
self._phase = 0
class ThreePhaseSteadyState(Controller):
def __init__(self, environment, omega_el=15):
self._omega_el = omega_el
self._tau = environment.physical_system.tau
self._k = 0
t = np.linspace(0, 2 * np.pi / abs(omega_el), 1 / abs(omega_el * self._tau))
self._u_a = np.sin(omega_el * t)
self._u_b = np.sin(omega_el * t - 2/3 * np.pi)
self._u_c = np.sin(omega_el * t + 2/3 * np.pi)
def reset(self):
self._k = -1
def control(self, state, reference):
self._k += 1
length = len(self._u_a)
return self._u_a[self._k % length], self._u_b[self._k % length], self._u_c[self._k % length],
_controllers = {
'on_off': OnOffController,
'three_point': ThreePointController,
'p_controller': PController,
'pi_controller': PIController,
'pmsm_on_off': PmsmOnOffController,
'synrm_on_off': SynRmOnOffController,
'cascaded_pi': CascadedPIController,
'foc_controller': FOCController,
'pmsm_p_controller': PmsmPController,
'three_phase_steadystate': ThreePhaseSteadyState
}
|
the-stack_0_8419 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_overlay_global
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Configures anycast gateway MAC of the switch.
description:
- Configures anycast gateway MAC of the switch.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default restores params default value
- Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE",
"EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE"
options:
anycast_gateway_mac:
description:
- Anycast gateway mac of the switch.
required: true
'''
EXAMPLES = '''
- nxos_overlay_global:
anycast_gateway_mac: "b.b.b"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["fabric forwarding anycast-gateway-mac 000B.000B.000B"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac',
}
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.findall(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
value = ''
if has_command:
value = has_command[0]
existing[arg] = value
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if value:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, proposed in proposed_commands.items():
existing_value = existing_commands.get(key)
if proposed == 'default' and existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
elif 'anycast-gateway-mac' in key and proposed != 'default':
proposed = normalize_mac(proposed, module)
existing_value = normalize_mac(existing_value, module)
if proposed != existing_value:
command = '{0} {1}'.format(key, proposed)
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def normalize_mac(proposed_mac, module):
if proposed_mac is None:
return ''
try:
if '-' in proposed_mac:
splitted_mac = proposed_mac.split('-')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
elif '.' in proposed_mac:
splitted_mac = []
splitted_dot_mac = proposed_mac.split('.')
if len(splitted_dot_mac) != 3:
raise ValueError
for octect in splitted_dot_mac:
if len(octect) > 4:
raise ValueError
else:
octect_len = len(octect)
padding = 4 - octect_len
splitted_mac.append(octect.zfill(padding + 1))
elif ':' in proposed_mac:
splitted_mac = proposed_mac.split(':')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
else:
raise ValueError
except ValueError:
module.fail_json(msg='Invalid MAC address format', proposed_mac=proposed_mac)
joined_mac = ''.join(splitted_mac)
mac = [joined_mac[i:i + 4] for i in range(0, len(joined_mac), 4)]
return '.'.join(mac).upper()
def main():
argument_spec = dict(
anycast_gateway_mac=dict(required=True, type='str'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
the-stack_0_8420 | import networkx as nx
from networkx.readwrite import json_graph
import pylab as plt
import json
import sys
import os
from c_aws import *
import urllib3
import concurrent.futures
import time
def carve_results():
# call subnet lambdas to collect their results from their beacons
# get all registered beacons from SSM
print('getting latest test results')
# get a list of subnets, accounts, regions, and beacons
subnets = get_subnet_beacons()
# use threading for speed, get all beacon reports
results = {}
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
p = os.environ['Prefix']
for beacon, data in subnets.items():
print(f"getting results from {beacon}")
payload = {
'action': 'results',
'beacon': beacon
}
futures.append(executor.submit(
aws_invoke_lambda,
arn=f"arn:aws:lambda:{data['region']}:{data['account']}:function:{p}carve-{data['subnet']}",
payload=payload,
region=data['region'],
credentials=None))
for future in concurrent.futures.as_completed(futures):
result = future.result()
results[result['subnet']] = {
'beacon': result['beacon'],
'status': result['status'],
'fping': result['fping'],
'health': result['health'],
'ts': result['ts']
}
# push subnet beacons data to S3
log = json.dumps(results, ensure_ascii=True, indent=2, sort_keys=True)
aws_put_direct(log, f"logs/verification-{int(time.time())}")
return
def process_test_results(results):
# determine verification beacons here
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
subnet_beacons = get_subnet_beacons()
verify_beacons = []
for edge in G.edges:
if vpc not in edge:
G.remove_edge(edge[0], edge[1])
# def get_asgs(G=None):
# if G is None:
# G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
# # determine all deployed ASGs
# asgs = {}
# for subnet in list(G.nodes):
# asg = f"{os.environ['Prefix']}carve-beacon-asg-{G.nodes().data()[subnet]['VpcId']}"
# if asg not in asgs:
# asgs[asg] = {
# 'account': G.nodes().data()[subnet]['Account'],
# 'region': G.nodes().data()[subnet]['Region'],
# }
# for asg, values in asgs.items():
# return asgs
def scale_beacons(scale):
'''
discover all beacon IP address
add the beacons to the carve-config cloudformation snippet
push the snipped to regional s3 buckets to be used as a cloudformation include
'''
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
vpcs = {}
payload = []
for subnet in list(G.nodes):
# determine VPCs and regions
a = G.nodes().data()[subnet]['Account']
r = G.nodes().data()[subnet]['Region']
vpcs[G.nodes().data()[subnet]['VpcId']] = (a, r)
# add an ssm path to store tokens for each subnet
payload.append({
'parameter': f"/{os.environ['Prefix']}carve-resources/tokens/{subnet}",
'task': 'scale',
'scale': scale
})
# start a step function to generate tokens to track scaling each subnet
name = f"scale-{scale}-{int(time.time())}"
print('starting token step function')
aws_start_stepfunction(os.environ['TokenStateMachine'], payload, name)
# generate a list of autoscaling groups to scale
asgs = []
for vpc, ar in vpcs.items():
vpc_subnets = [x for x,y in G.nodes(data=True) if y['VpcId'] == vpc]
asgs.append({
'asg': f"{os.environ['Prefix']}carve-beacon-asg-{vpc}",
'account': ar[0],
'region': ar[1],
'subnets': vpc_subnets
})
# wait for tokens to appear before scaling
i = 0
while True:
tokens = aws_ssm_get_parameters(f"/{os.environ['Prefix']}carve-resources/tokens/")
if len(payload) == len(tokens):
print('tokens are ready')
break
else:
if i > 30:
print('timed out waiting for tokens')
else:
i = i + 1
print('waiting 1s for tokens...')
time.sleep(1)
print(f'scaling asgs: {asgs}')
# using threading, set all ASGs to correct scale for all beacons
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for asg in asgs:
if scale == 'none':
desired = 0
elif scale == 'subnet':
desired = len(asg['subnets'])
elif scale == 'vpc':
desired = 1
futures.append(executor.submit(
update_asg_size,
account=asg['account'],
asg=asg['asg'],
minsize=0,
maxsize=len(asg['subnets']),
desired=desired,
region=asg['region']
))
for future in concurrent.futures.as_completed(futures):
result = future.result()
def update_asg_size(account, asg, minsize, maxsize, desired, region):
credentials=aws_assume_role(carve_role_arn(account), f"lookup-{asg}")
asg_info = aws_describe_asg(asg, region, credentials)
print(f'scaling asg: {asg}')
# only update ASG if min/max/desired is different
update = False
if int(asg_info['MinSize']) != int(minsize):
print('scale due to MinSize')
update = True
elif int(asg_info['MaxSize']) != int(maxsize):
print('scale due to MaxSize')
update = True
elif int(asg_info['DesiredCapacity']) != int(desired):
print('scale due to DesiredCapacity')
update = True
else:
print('no scaling update to ASG')
if update:
aws_update_asg_size(asg, minsize, maxsize, desired, region, credentials)
else:
# if no udpates, return success for the task tokens
subnets = asg_info['VPCZoneIdentifier'].split(',')
print(f'clearing tokens for subnets: {subnets}')
for subnet in subnets:
ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{subnet}"
token = aws_ssm_get_parameter(ssm_param)
aws_ssm_delete_parameter(ssm_param)
if token is not None:
aws_send_task_success(token, {"action": "scale", "result": "none"})
else:
print(f'taskToken was None for {subnet}')
def get_subnet_beacons():
# return dict containing all subnets with their beacon ip, account, and region
# load latest graph
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
subnet_beacons = json.loads(aws_read_s3_direct('managed_deployment/subnet-beacons.json', current_region))
subnets = {}
# for vpc in list(G.nodes):
for subnet, data in G.nodes().data():
# only get results if there is an active beacon in the subnet
if subnet in subnet_beacons:
subnets[subnet_beacons[subnet]] = {
'subnet': subnet,
'account': data['Account'],
'region': data['Region']
}
else:
# this conditon needs to be handled if there is no beacon
pass
return subnets
def update_carve_beacons():
'''
discover all beacon IP address
add the beacons to the carve-config cloudformation snippet
push the snipped to regional s3 buckets to be used as a cloudformation include
'''
print('updating carve beacons')
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
# determine all deployed ASGs
asgs = {}
for subnet in list(G.nodes):
asg = f"{os.environ['Prefix']}carve-beacon-asg-{G.nodes().data()[subnet]['VpcId']}"
if asg not in asgs:
asgs[asg] = {
'account': G.nodes().data()[subnet]['Account'],
'region': G.nodes().data()[subnet]['Region']
}
# threaded look up the IP address of all beacons in all ASGs
subnet_beacons = {}
all_beacons = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for asg, value in asgs.items():
futures.append(executor.submit(
get_beacons_thread, asg=asg, account=value['account'], region=value['region']))
for future in concurrent.futures.as_completed(futures):
result = future.result()
subnet_beacons.update(result)
for subnet, beacon in result.items():
all_beacons.append(beacon)
# push subnet beacons data to S3
data = json.dumps(subnet_beacons, ensure_ascii=True, indent=2, sort_keys=True)
aws_put_direct(data, 'managed_deployment/subnet-beacons.json')
# # create an updated config file with all the beacons
# config_path = "managed_deployment/carve-config.json"
# with open(config_path) as f:
# config = json.load(f)
# config['/root/carve.cfg']['content'] = '\n'.join(beacons)
# # push carve config file to S3
# data = json.dumps(config, ensure_ascii=True, indent=2, sort_keys=True)
# aws_put_direct(data, config_path)
# get a list of subnets, accounts, regions, and beacons
subnets = get_subnet_beacons()
# use threading to update all beacons with new beacon lists
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
p = os.environ['Prefix']
for beacon, data in subnets.items():
futures.append(executor.submit(
aws_invoke_lambda,
arn=f"arn:aws:lambda:{data['region']}:{data['account']}:function:{p}carve-{data['subnet']}",
payload={
'action': 'update',
'beacon': beacon,
'beacons': ','.join(all_beacons)
},
region=data['region'],
credentials=None))
for future in concurrent.futures.as_completed(futures):
results.append(future.result())
print(results)
# # copy config file to all required regions for CloudFormation includes
# prefix = os.environ['Prefix']
# org = os.environ['OrgId']
# for r in regions:
# aws_copy_s3_object(
# key=config_path,
# target_key=config_path,
# source_bucket=os.environ['CarveS3Bucket'],
# target_bucket=f"{prefix}carve-managed-bucket-{org}-{r}")
# # update all VPC stacks
# deploy_key = get_deploy_key(last=True)
# if deploy_key is not None:
# start_carve_deployment(event, context, key=deploy_key)
# else:
# print('No previous deploy key to run updates with')
def get_beacons_thread(asg, account, region):
# threaded lookup of all beacon IP addresses in an ASG
credentials = aws_assume_role(carve_role_arn(account), f"lookup-{asg}")
instance_ids = []
asg_info = aws_describe_asg(asg, region, credentials)
for instance in asg_info['Instances']:
if instance['LifecycleState'] == "InService":
instance_ids.append(instance['InstanceId'])
instances = aws_describe_instances(instance_ids, region, credentials)
beacons = {}
for instance in instances:
beacons[instance['SubnetId']] = instance['PrivateIpAddress']
return beacons
def ssm_event(event, context):
ssm_param = event['detail']['name']
ssm_value = aws_ssm_get_parameter(ssm_param)
if ssm_param.split('/')[-1] == 'scale':
scale_beacons(ssm_value)
elif ssm_param.split('/')[-1] == 'status':
# should enable/disable continuous verification
pass
def cleanup_ssm():
# make function to clean up SSM tokens
# move function to cleanup workflow
pass
def asg_event(event):
# should only be one item, but treat as a list
for record in event['Records']:
message = json.loads(record['Sns']['Message'])
print(f"TRIGGERED by ASG: {message['detail']['AutoScalingGroupName']}")
# get insances from event data
instance_id = ""
for resource in message['resources']:
if resource.startswith("arn:aws:ec2"):
instance_id = resource.split('/')[1]
vpc = message['detail']['AutoScalingGroupName'].split(f"{os.environ['Prefix']}carve-beacon-asg-")[-1]
credentials = aws_assume_role(carve_role_arn(message['account']), f"event-{message['detail']['AutoScalingGroupName']}")
# get instance metadata from account and update SSM
ec2 = aws_describe_instances([instance_id], message['region'], credentials)[0]
# print(ec2)
# parameter = f"/{os.environ['Prefix']}carve-resources/vpc-beacons/{vpc}/{ec2['InstanceId']}"
if 'EC2 Instance Launch Successful' == message['detail-type']:
# # add to SSM
# print(f"adding beacon to ssm: {instance_id} - {ec2['PrivateIpAddress']} - {ec2['SubnetId']}")
# beacon = {ec2['PrivateIpAddress']: ec2['SubnetId']}
# aws_ssm_put_parameter(parameter, json.dumps(beacon))
### need to update this code to grab subnet ssm param instead of ASG
# append azid code to end of instance name
subnet = aws_describe_subnets(message['region'], credentials, message['account'], ec2['SubnetId'])[0]
az = subnet['AvailabilityZoneId'].split('-')[-1]
name = f"{os.environ['Prefix']}carve-beacon-{ec2['SubnetId']}-{az}"
tags = [{'Key': 'Name', 'Value': name}]
aws_create_ec2_tag(ec2['InstanceId'], tags, message['region'], credentials)
function = f"arn:aws:lambda:{message['region']}:{message['account']}:function:{os.environ['Prefix']}carve-{ec2['SubnetId']}"
beacon = ec2['PrivateIpAddress']
## will need to udate SSM logic for tokens to be 1 token per subnet that will come back up?
## or do we check the whole ASG for health?
i = 0
while True:
result = beacon_results(function, beacon)
print(result)
if result['health'] == 'up':
# ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{asg}",
ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{ec2['SubnetId']}"
token = aws_ssm_get_parameter(ssm_param)
aws_ssm_delete_parameter(ssm_param)
if token is not None:
aws_send_task_success(token, {"action": "scale", "result": "success"})
else:
print(f"taskToken was None for {ec2['SubnetId']}")
break
else:
if i > 30:
break
print(f'timed out waiting for beacon {beacon}')
else:
print(f'waiting for beacon {beacon} - {i}')
i = i + 1
time.sleep(1)
elif 'EC2 Instance Terminate Successful' == message['detail-type']:
# ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{asg}",
subnet = message['detail']['Details']['Subnet ID']
ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{subnet}"
token = aws_ssm_get_parameter(ssm_param)
aws_ssm_delete_parameter(ssm_param)
if token is not None:
aws_send_task_success(token, {"action": "scale", "result": "success"})
else:
print(f'taskToken was None for {subnet}')
print(f"beacon terminated {message}")
def beacon_results(function, beacon):
region = function.split(':')[3]
subnet = function.split(':')[-1]
print(f"getting beacon results from {subnet}")
payload = {
'action': 'results',
'beacon': beacon
}
result = aws_invoke_lambda(
arn=function,
payload=payload,
region=region,
credentials=None
)
return result
def carve_role_arn(account):
# return the carve IAM role ARN for any account number
role_name = f"{os.environ['Prefix']}carve-core"
role = f"arn:aws:iam::{account}:role/{role_name}"
return role
def network_diff(A, B):
# compare peering both directions
diff_peering(A, B)
diff_vpcs(A, B)
def diff_peering(A, B, repeat=True):
for edge in A.edges() - B.edges():
print(f"DIFFERENCE DETECTED! \'{B.graph['Name']}\' contains a PEERING CONNECTION that \'{A.graph['Name']}\' does not:")
print(f"#######################")
print(A.nodes().data()[edge[0]])
print(f"-------peered to-------")
print(A.nodes().data()[edge[1]])
print(f"#######################")
if repeat:
diff_peering(B, A, repeat=False)
def diff_vpcs(A, B, repeat=True):
for node in A.nodes() - B.nodes():
print(f"DIFF DETECTED! \'{B.graph['Name']}\' contains a VPC that \'{A.graph['Name']}\' does not:")
print(f"#######################")
print(A.nodes().data()[node])
print(f"#######################")
if repeat:
diff_peering(B, A, repeat=False)
def export_visual(Graph, c_context):
G = Graph
# remove isolated nodes from graph
if 'peers_only' in c_context:
if c_context['peers_only'] == 'true':
G.remove_nodes_from(list(nx.isolates(G)))
print('drawing graph diagram')
# print(f"/src/c_graphic_{G.graph['Name']}.png")
options = {
'node_color': 'blue',
'node_size': 100,
'font_size': 14,
'width': 3,
'with_labels': True,
}
plt.figure(G.graph['Name'],figsize=(24,24))
nx.draw_circular(G, **options)
# G = nx.cycle_graph(80)
# pos = nx.circular_layout(G)
# # default
# plt.figure(1)
# nx.draw(G,pos)
# # smaller nodes and fonts
# plt.figure(2)
# nx.draw(G,pos,node_size=60,font_size=8)
# # larger figure size
# plt.figure(3,figsize=(12,12))
# nx.draw(G,pos)
plt.savefig(f"/src/c_graphic_{G.graph['Name']}.png")
def draw_vpc(Graph, vpc):
G = Graph
print('drawing graph diagram')
print(f"/src/c_graphic_{vpc}.png")
# remove all edges without vpc
for edge in G.edges:
if vpc not in edge:
G.remove_edge(edge[0], edge[1])
# remove all nodes left without edges
G.remove_nodes_from(list(nx.isolates(G)))
options = {
'node_color': 'blue',
'node_size': 100,
'font_size': 14,
'width': 3,
'with_labels': True,
}
plt.figure(vpc,figsize=(24,24))
# nx.draw_circular(G, **options)
# nx.draw_networkx(G, **options) # good for single
# nx.draw_spectral(G, **options)
# nx.draw_spring(G, **options) # similar to netoworkx also good
nx.draw_shell(G, **options)
plt.savefig(f"/src/c_graphic_{vpc}.png")
def load_graph(graph, local=True):
try:
if local:
with open(graph) as f:
G = json_graph.node_link_graph(json.load(f))
G.graph['Name'] = graph.split('/')[-1].split('.')[0]
return G
else:
graph_data = aws_read_s3_direct(graph, current_region)
G = json_graph.node_link_graph(json.loads(graph_data))
return G
except Exception as e:
print(f'error opening json_graph {json_graph}: {e}')
sys.exit()
def save_graph(G, file_path):
# save json data
try:
os.remove(file_path)
except:
pass
with open(file_path, 'a') as f:
json.dump(json_graph.node_link_data(G), f)
# def main(c_context):
# # either load graph data for G from json, or generate dynamically
# if 'json_graph' in c_context:
# G = load_graph(c_context['json_graph'])
# else:
# G = False
# if not G:
# G = discovery(c_context)
# if 'export_visual' in c_context:
# if c_context['export_visual'] == 'true':
# export_visual(G, c_context)
# if 'diff_graph' in c_context:
# D = load_graph(c_context['diff_graph'])
# if D:
# network_diff(G, D)
# else:
# print(f'cannot compare: diff_graph did not load')
# draw_vpc(G, c_context['VpcId'])
|
the-stack_0_8422 | import sys, csv, os, string, re, shutil
# @function DATE FUNCTIONS
# @version v0.18.04.30
##################################
def dtos(dt=''):
if (len(dt) == 10):
ano = dt[6]+dt[7]+dt[8]+dt[9]
mes = dt[3]+dt[4]
dia = dt[0]+dt[1]
data = ano+"-"+mes+"-"+dia
sr = data
else:
sr = '0000-00-00'
return sr
# @function PADRONIZAÇÃO DE NOMES
# @version v0.18.04.30
##################################
##################################
def nbr_title(title=''):
sr = ''
uc = 1
title = title.lower()
for x in range(0, len(title)):
if len(title) > 0:
t = title[x]
if uc == 1:
if not t.isupper():
sr = sr + t.upper()
else:
sr = sr + t
else:
sr = sr + t
uc = 0
if t == '.':
uc = 1
return sr
def nbr_name(name=''):
uc = 1
sr = ''
name = name.replace('.', '. ')
for x in range(0, len(name)):
# Caracter ############################
s = name[x]
if uc == 1:
if not s.isupper():
sr = sr + s.upper()
else:
sr = sr + s
uc = 0
else:
sr = sr + s.lower()
uc = 0
if s == ' ' or s == '.':
uc = 1
# Regras ##################################
sr = sr.replace(' ', ' ')
sr = sr.replace(' E ', ' e ')
sr = sr.replace(' De ', ' de ')
sr = sr.replace(' Do ', ' do ')
sr = sr.replace(' Dos ', ' dos ')
sr = sr.replace(' Da ', ' da ')
sr = sr.replace(' Das ', ' das ')
sr = sr.replace(' Em ', ' em ')
sr = sr.replace(' O ', ' o ')
return sr
with open('U:/Excel-Metadados/pilla_acervodoc.csv', newline='') as csvfile:
handle = '2050011959'
license = 'license.txt'
ast = ''
spamreader = csv.reader(csvfile, delimiter=';')
for row in spamreader:
hd = row[2]
while len(hd) < 5:
hd = '0' + hd
hd = '300' + hd
directory = 'pilla_raul/' + hd
########################### HANDLE
handle_nr = handle + '/' + hd
########################### ID
id = row[2]
idf = id
while (len(idf) < 4):
idf = '0' + idf
########################### ABSTRACT
abstract = row[11]
abstract = re.sub('\r\n', '; ', abstract)
title = nbr_name(abstract)
abstract = 'De: '+row[7]+'\r\rPara: '+row[8]+'\n\rData: '+row[6]+'\n\rDescrição: '+abstract
tl = title.split('.')
if len(tl) > 0:
title = tl[0]
title = title + ';'
tl = title.split(';')
if len(tl) > 0:
title = tl[0]
########################### ASSUNTO
t = row[10]
t = re.sub('\r\n', ';', t)
t = re.sub('; ', ';', t)
t = t.split(';')
subj = '';
for tt in t:
tt.split()
if len(tt) > 1:
tt.strip()
tt.rstrip();
tt.lstrip();
if len(tt) > 2:
subj = subj + tt + ';'
ast = ast + 'insert into pilla (r_arq, r_nrdoc, r_nrdoc2, r_doc, r_local, r_dtdoc, r_remetente, r_destinatario, r_descricao, r_assunto, r_n1, r_data2, r_isd, r_rmes) '
ast = ast + ' values '
row7 = ''
row8 = ''
if len(row[7]) > 3:
row7 = "" + nbr_name(row[7]) + ""
if len(row[8]) > 3:
row8 = "" + nbr_name(row[8]) + ""
ast = ast + "("
ast = ast + "'"+row[1]+"', "
ast = ast + "'"+row[2]+"', "
ast = ast + "'"+row[3]+"', "
ast = ast + "'"+row[4]+"', "
ast = ast + "'"+row[5]+"', "
#ast = ast + "'"+row[6]+"', "
ast = ast + "'"+row8+"', "
ast = ast + "'"+row7+"', "
ast = ast + "'"+row[9]+"', "
ast = ast + "'"+row[10]+"', "
ast = ast + "'"+row[11]+"', "
ast = ast + "'"+row[12]+"', "
ast = ast + "'"+dtos(row[13])+"', "
ast = ast + "'"+row[14]+"', "
ast = ast + "'"+row[15]+"' "
ast = ast + ");"
ast = ast + '\r\n'
######################### Bundle
arq = 'd:/lixo/n.sql'
ok = 0;
######################### DUBLIC CORE
fdc = open(arq, 'w')
fdc.write(ast)
fdc.close()
|
the-stack_0_8423 | # -*- coding: utf-8 -*-
"""Repair command tests"""
from __future__ import unicode_literals
from django.core import management
from modoboa.lib.permissions import ObjectAccess, get_object_owner
from modoboa.lib.tests import ModoTestCase
from .. import factories, models
class RepairTestCase(ModoTestCase):
"""TestCase for repair command."""
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create some data."""
super(RepairTestCase, cls).setUpTestData()
factories.populate_database()
def test_management_command(self):
"""Check that command works fine."""
ObjectAccess.objects.all().delete()
mbox = models.Mailbox.objects.first()
alias = models.Alias.objects.first()
# assert mbox has no owner
self.assertIs(get_object_owner(mbox), None)
# fix it. run in quiet mode because we dont want output in tests
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
# assert it's fixed
self.assertIsNot(get_object_owner(mbox), None)
self.assertIsNot(get_object_owner(alias), None)
def test_management_command_with_dry_run(self):
"""Check that command works fine."""
ObjectAccess.objects.all().delete()
mbox = models.Mailbox.objects.first()
# assert mbox has no owner
self.assertIs(get_object_owner(mbox), None)
# show problems. run in quiet mode because we dont want output in tests
ret = management.call_command("modo", "repair", "--quiet", "--dry-run")
assert ret is None
# assert its not fixed
self.assertIs(get_object_owner(mbox), None)
def test_management_command_with_nul_domain(self):
"""Just assume nothing raise when an alias has no domain."""
models.Alias.objects.create(address="@modoboa.xxx")
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
def test_management_command_with_no_alias(self):
"""Check that problem is fixed."""
count, detail = models.Alias.objects.filter(
address="[email protected]", internal=True).delete()
self.assertEqual(count, 3)
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
self.assertTrue(
models.Alias.objects.filter(
address="[email protected]", internal=True).exists())
|
the-stack_0_8425 | """
Support for Modbus Coil sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.modbus/
"""
import logging
import voluptuous as vol
from homeassistant.components import modbus
from homeassistant.const import CONF_NAME, CONF_SLAVE
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.helpers import config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
CONF_COIL = 'coil'
CONF_COILS = 'coils'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COILS): [{
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int
}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Modbus binary sensors."""
sensors = []
for coil in config.get(CONF_COILS):
sensors.append(ModbusCoilSensor(
coil.get(CONF_NAME),
coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
add_devices(sensors)
class ModbusCoilSensor(BinarySensorDevice):
"""Modbus coil sensor."""
def __init__(self, name, slave, coil):
"""Initialize the modbus coil sensor."""
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._value = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._value
def update(self):
"""Update the state of the sensor."""
result = modbus.HUB.read_coils(self._slave, self._coil, 1)
try:
self._value = result.bits[0]
except AttributeError:
_LOGGER.error(
'No response from modbus slave %s coil %s',
self._slave,
self._coil)
|
the-stack_0_8426 | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Start apollo data recorder.
It lists all available disks mounted under /media, and prioritize them in order:
- Disk#1. Largest NVME disk
- Disk#2. Smaller NVME disk
- ...
- Disk#x. Largest Non-NVME disk
- Disk#y. Smaller Non-NVME disk
- ...
Run with '--help' to see more options.
"""
import argparse
import datetime
import os
import subprocess
import sys
import psutil
MAP_COLLECTION_DATA_TOPICS = [
'/apollo/monitor/system_status',
'/apollo/sensor/gnss/best_pose',
'/apollo/sensor/gnss/gnss_status',
'/apollo/sensor/gnss/imu',
'/apollo/sensor/gnss/ins_stat',
'/apollo/sensor/gnss/odometry',
'/apollo/sensor/gnss/raw_data',
'/tf',
'/tf_static',
'/apollo/sensor/camera/front_12mm/image/compressed',
'/apollo/sensor/camera/front_6mm/image/compressed',
'/apollo/sensor/lidar16/front/up/Scan',
'/apollo/sensor/lidar16/front/up/compensator/PointCloud2',
'/apollo/sensor/lidar128/Scan',
'/apollo/sensor/lidar128/compensator/PointCloud2',
]
def shell_cmd(cmd, alert_on_failure=True):
"""Execute shell command and return (ret-code, stdout, stderr)."""
print('SHELL > {}'.format(cmd))
proc = subprocess.Popen(cmd, shell=True, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.wait()
stdout = proc.stdout.read().decode('utf-8') if proc.stdout else None
stderr = proc.stderr.read().decode('utf-8') if proc.stderr else None
if alert_on_failure and stderr and ret != 0:
sys.stderr.write('{}\n'.format(stderr))
return (ret, stdout, stderr)
class ArgManager(object):
"""Arguments manager."""
def __init__(self):
self.parser = argparse.ArgumentParser(
description="Manage apollo data recording.")
self.parser.add_argument('--start', default=False, action="store_true",
help='Start recorder. It is the default '
'action if no other actions are triggered. In '
'that case, the False value is ignored.')
self.parser.add_argument('--stop', default=False, action="store_true",
help='Stop recorder.')
self.parser.add_argument('--split_duration', default="1m",
help='Duration to split bags, will be applied '
'as parameter to "rosbag record --duration".')
self._args = None
def args(self):
"""Get parsed args."""
if self._args is None:
self._args = self.parser.parse_args()
return self._args
class DiskManager(object):
"""Disk manager."""
def __init__(self):
"""Manage disks."""
disks = []
for disk in psutil.disk_partitions():
if not disk.mountpoint.startswith('/media/'):
continue
disks.append({
'mountpoint': disk.mountpoint,
'available_size': DiskManager.disk_avail_size(disk.mountpoint),
'is_nvme': disk.mountpoint.startswith('/media/apollo/internal_nvme'),
})
# Prefer NVME disks and then larger disks.
self.disks = sorted(
disks, reverse=True,
key=lambda disk: (disk['is_nvme'], disk['available_size']))
@staticmethod
def disk_avail_size(disk_path):
"""Get disk available size."""
statvfs = os.statvfs(disk_path)
return statvfs.f_frsize * statvfs.f_bavail
class Recorder(object):
"""Data recorder."""
def __init__(self, args):
self.args = args
self.disk_manager = DiskManager()
def start(self):
"""Start recording."""
if Recorder.is_running():
print('Another data recorder is running, skip.')
return
disks = self.disk_manager.disks
# Use the best disk, or fallback '/apollo' if none available.
disk_to_use = disks[0]['mountpoint'] if len(disks) > 0 else '/apollo'
topics = list(MAP_COLLECTION_DATA_TOPICS)
self.record_task(disk_to_use, topics)
def stop(self):
"""Stop recording."""
shell_cmd('pkill -f "cyber_recorder record"')
def record_task(self, disk, topics):
"""Record tasks into the <disk>/data/bag/<task_id> directory."""
task_id = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
task_dir = os.path.join(disk, 'data/bag', task_id)
print('Recording bag to {}'.format(task_dir))
log_file = '/apollo/data/log/apollo_record.out'
topics_str = ' -c '.join(topics)
os.makedirs(task_dir)
cmd = '''
cd "{}"
source /apollo/scripts/apollo_base.sh
source /apollo/framework/install/setup.bash
nohup cyber_recorder record -c {} >{} 2>&1 &
'''.format(task_dir, topics_str, log_file)
shell_cmd(cmd)
@staticmethod
def is_running():
"""Test if the given process running."""
_, stdout, _ = shell_cmd('pgrep -c -f "cyber_recorder record"', False)
# If stdout is the pgrep command itself, no such process is running.
return stdout.strip() != '1' if stdout else False
def main():
"""Main entry."""
arg_manager = ArgManager()
args = arg_manager.args()
recorder = Recorder(args)
if args.stop:
recorder.stop()
else:
recorder.start()
if __name__ == '__main__':
main()
|
the-stack_0_8427 | import moviepy.editor as mpy
import argparse
import os
def parseArgs():
parser = argparse.ArgumentParser(
description='Edit video by picking intervals and highlight danmu')
parser.add_argument('vid_id', type=str,
help='the id for the video')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parseArgs()
vid_id = args.vid_id
print('Start preprocessing {}'.format(vid_id))
with open('cache/{}_intervals.txt'.format(vid_id)) as f:
intervals = eval(f.readline())
intervals_danmu = eval(f.readline())
ori_clip = mpy.VideoFileClip('cache/{}.mp4'.format(vid_id))
for i in intervals:
direc = os.listdir('cache/')
if '{}_clip_{}.mp4'.format(vid_id, i) in direc:
continue
ori_clip.subclip(i[0], i[1]).write_videofile(
'cache/{}_clip_{}.mp4'.format(vid_id, i))
print('Video clips splitted, start interactive editing')
clips = []
for i, d in list(zip(intervals,intervals_danmu)):
print('interval: {}\ndanmu:'.format(i))
print(d)
clip = mpy.VideoFileClip('cache/{}_clip_{}.mp4'.format(vid_id, i))
os.system('mpv \"cache/{}_clip_{}.mp4\" --loop'.format(vid_id, i))
shear = input('Give two time length to cut from head and tail> ')
if shear != '':
shear = list(map(int, shear.split()))
clip = clip.subclip(shear[0], clip.duration - shear[1])
picked = input('Pick danmu or press ENTER to abort this clip> ')
if picked == '':
continue
picked = picked
subtitle = (mpy.TextClip(picked, fontsize=40,
font='Microsoft-YaHei-UI-Bold',
color='white',
stroke_color='black',
stroke_width=2)
.margin(top=15, left=45, opacity=0)
.set_position(('left', 'top')))
clips.append(mpy.CompositeVideoClip([clip, subtitle])
.fadein(.3).set_duration(clip.duration).fadeout(.3))
out_clip = mpy.concatenate_videoclips(clips)
out_clip.write_videofile('output/{}_fin.mp4'.format(vid_id))
print('Edit done!')
|
the-stack_0_8428 | import gym
import numpy as np
from gym.envs.registration import register
# Refer https://github.com/openai/gym/issues/565
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4', 'is_slippery': False},
max_episode_steps=2000,
reward_threshold=0.78, # optimum = .8196
)
#env = gym.make("FrozenLakeNotSlippery-v0")
env = gym.make("FrozenLakeNotSlippery-v0")
env.seed(0)
np.random.seed(56776)
# Test how does the game work.
print("-------------Test game--------------")
ql_table = np.zeros([env.observation_space.n, env.action_space.n])
print(ql_table)
env.render()
env.reset()
hardcore_steps = [1, 1, 2, 2, 1, 2]
for step in hardcore_steps:
env.step(step)
env.render()
# Let machine learng the step.
print("-------------Let machine learng the steps--------------")
env.reset()
env.render()
ql_table = np.zeros([env.observation_space.n, env.action_space.n]) + np.random.randn(16, 4)
print(ql_table)
"""
Hyper parameters:
"""
n_round = 5000
n_steps = 2000
lr = 0.3
discount = 0.8
for round in range(n_round):
state = env.reset()
for step in range(n_steps):
action = np.argmax(ql_table[state, :] + np.random.randn(1, 4))
new_state, reward, done, _ = env.step(action)
ql_table[state, action] = (1 - lr) * ql_table[state, action] + \
lr * (reward + discount * np.max(ql_table[new_state, :]))
state = new_state
if done is True:
break
print(np.argmax(ql_table, axis=1))
print(np.around(ql_table, 6))
env.reset()
for step in np.argmax(ql_table, axis=1):
state_new, reward, done, _ = env.step(step)
env.render() |
the-stack_0_8429 | """
Написать функцию, которая перемещает два первых элемента списка в конец списка"
"""
numbers = [1, 2, 3, 4, 5]
def rotate(numbers):
numbers = [*numbers[2:], *numbers[0:2]]
return numbers
print(rotate(numbers)) |
the-stack_0_8435 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.dialogflow_v2beta1.types import environment
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class EnvironmentsTransport(abc.ABC):
"""Abstract transport class for Environments."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_environments: gapic_v1.method.wrap_method(
self.list_environments, default_timeout=None, client_info=client_info,
),
}
@property
def list_environments(
self,
) -> typing.Callable[
[environment.ListEnvironmentsRequest],
typing.Union[
environment.ListEnvironmentsResponse,
typing.Awaitable[environment.ListEnvironmentsResponse],
],
]:
raise NotImplementedError()
__all__ = ("EnvironmentsTransport",)
|
the-stack_0_8436 | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for creating EstimatorSpecs for Onsets and Frames models."""
import functools
from magenta.models.onsets_frames_transcription import constants
from magenta.models.onsets_frames_transcription import drum_mappings
from magenta.models.onsets_frames_transcription import infer_util
from magenta.models.onsets_frames_transcription import metrics
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import tpu as contrib_tpu
def _drums_only_metric_ops(features, labels, frame_probs, onset_probs,
frame_predictions, onset_predictions,
offset_predictions, velocity_values, hparams):
"""Generate drum metrics: offsets/frames are ignored."""
del frame_predictions, offset_predictions # unused
metric_ops = metrics.define_metrics(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=onset_predictions,
onset_predictions=onset_predictions,
offset_predictions=onset_predictions,
velocity_values=velocity_values,
length=features.length,
sequence_label=labels.note_sequence,
frame_labels=labels.labels,
sequence_id=features.sequence_id,
hparams=hparams,
min_pitch=constants.MIN_MIDI_PITCH,
max_pitch=constants.MAX_MIDI_PITCH,
prefix='drums/',
onsets_only=True,
pitch_map=drum_mappings.GROOVE_PITCH_NAMES)
return metric_ops
def get_metrics(features, labels, frame_probs, onset_probs, frame_predictions,
onset_predictions, offset_predictions, velocity_values,
hparams):
"""Return metrics values ops."""
if hparams.drums_only:
return _drums_only_metric_ops(
features=features,
labels=labels,
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
hparams=hparams)
else:
return metrics.define_metrics(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
length=features.length,
sequence_label=labels.note_sequence,
frame_labels=labels.labels,
sequence_id=features.sequence_id,
hparams=hparams)
def _predict_sequences(frame_probs, onset_probs, frame_predictions,
onset_predictions, offset_predictions, velocity_values,
hparams):
"""Predict a batch of sequences."""
def predict_sequence(frame_probs, onset_probs, frame_predictions,
onset_predictions, offset_predictions, velocity_values,
hparams):
"""Predict a single sequence."""
if hparams.drums_only:
sequence_prediction = infer_util.predict_sequence(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=onset_predictions,
onset_predictions=onset_predictions,
offset_predictions=onset_predictions,
velocity_values=velocity_values,
min_pitch=constants.MIN_MIDI_PITCH,
hparams=hparams,
onsets_only=True)
for note in sequence_prediction.notes:
note.is_drum = True
else:
sequence_prediction = infer_util.predict_sequence(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
min_pitch=constants.MIN_MIDI_PITCH,
hparams=hparams)
return sequence_prediction.SerializeToString()
sequences = []
for i in range(frame_predictions.shape[0]):
sequence = tf.py_func(
functools.partial(predict_sequence, hparams=hparams),
inp=[
frame_probs[i],
onset_probs[i],
frame_predictions[i],
onset_predictions[i],
offset_predictions[i],
velocity_values[i],
],
Tout=tf.string,
stateful=False)
sequence.set_shape([])
sequences.append(sequence)
return tf.stack(sequences)
def get_estimator_spec(hparams, mode, features, labels, frame_logits,
onset_logits, offset_logits, velocity_values,
offset_network=True):
"""Create TPUEstimatorSpec."""
loss_metrics = {}
loss = None
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
onset_losses = tf.losses.sigmoid_cross_entropy(
labels.onsets[:, :, :constants.MIDI_PITCHES],
onset_logits[:, :, :constants.MIDI_PITCHES],
weights=tf.expand_dims(
tf.sequence_mask(
features.length, maxlen=tf.shape(labels.onsets)[1]),
axis=2))
loss_metrics['onset'] = onset_losses
if offset_network and not hparams.drums_only:
offset_losses = tf.losses.sigmoid_cross_entropy(
labels.offsets[:, :, :constants.MIDI_PITCHES],
offset_logits[:, :, :constants.MIDI_PITCHES],
weights=tf.expand_dims(
tf.sequence_mask(
features.length, maxlen=tf.shape(labels.offsets)[1]),
axis=2))
loss_metrics['offset'] = offset_losses
velocity_losses = tf.losses.mean_squared_error(
labels.velocities, velocity_values,
weights=labels.onsets * hparams.velocity_loss_weight)
loss_metrics['velocity'] = velocity_losses
if not hparams.drums_only:
frame_losses = tf.losses.sigmoid_cross_entropy(
labels.labels[:, :, :constants.MIDI_PITCHES],
frame_logits[:, :, :constants.MIDI_PITCHES],
weights=tf.expand_dims(
tf.sequence_mask(
features.length, maxlen=tf.shape(labels.labels)[1]),
axis=2))
loss_metrics['frame'] = frame_losses
loss = tf.losses.get_total_loss()
if (mode == tf.estimator.ModeKeys.EVAL or
mode == tf.estimator.ModeKeys.PREDICT):
frame_probs = tf.sigmoid(frame_logits)
onset_probs = tf.sigmoid(onset_logits)
if offset_network:
offset_probs = tf.sigmoid(offset_logits)
else:
offset_probs = tf.zeros_like(onset_probs)
frame_predictions = frame_probs > hparams.predict_frame_threshold
onset_predictions = onset_probs > hparams.predict_onset_threshold
offset_predictions = offset_probs > hparams.predict_offset_threshold
if hparams.drum_prediction_map:
map_predictions = functools.partial(
drum_mappings.map_pianoroll,
mapping_name=hparams.drum_prediction_map,
reduce_mode='any',
min_pitch=constants.MIN_MIDI_PITCH)
frame_predictions = tf.map_fn(map_predictions, frame_predictions)
onset_predictions = tf.map_fn(map_predictions, onset_predictions)
offset_predictions = tf.map_fn(map_predictions, offset_predictions)
map_values = functools.partial(
drum_mappings.map_pianoroll,
mapping_name=hparams.drum_prediction_map,
reduce_mode='max',
min_pitch=constants.MIN_MIDI_PITCH)
velocity_values = tf.map_fn(map_values, velocity_values)
metrics_values = get_metrics(features, labels, frame_probs, onset_probs,
frame_predictions, onset_predictions,
offset_predictions, velocity_values, hparams)
for label, loss_collection in loss_metrics.items():
loss_label = 'losses/' + label
metrics_values[loss_label] = loss_collection
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = contrib_layers.optimize_loss(
name='training',
loss=loss,
global_step=tf.train.get_or_create_global_step(),
learning_rate=hparams.learning_rate,
learning_rate_decay_fn=functools.partial(
tf.train.exponential_decay,
decay_steps=hparams.decay_steps,
decay_rate=hparams.decay_rate,
staircase=True),
clip_gradients=hparams.clip_norm,
summaries=[],
optimizer=lambda lr: contrib_tpu.CrossShardOptimizer( # pylint:disable=g-long-lambda
tf.train.AdamOptimizer(lr)))
return contrib_tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
metric_ops = {k: tf.metrics.mean(v) for k, v in metrics_values.items()}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=metric_ops)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'frame_probs':
frame_probs,
'onset_probs':
onset_probs,
'frame_predictions':
frame_predictions,
'onset_predictions':
onset_predictions,
'offset_predictions':
offset_predictions,
'velocity_values':
velocity_values,
'sequence_predictions':
_predict_sequences(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
hparams=hparams),
# Include some features and labels in output because Estimator 'predict'
# API does not give access to them.
'sequence_ids':
features.sequence_id,
'sequence_labels':
labels.note_sequence,
'frame_labels':
labels.labels,
'onset_labels':
labels.onsets,
}
for k, v in metrics_values.items():
predictions[k] = tf.stack(v)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
raise ValueError('Unsupported mode: %s' % mode)
|
the-stack_0_8437 | """
Metadata for morphology experiments.
"""
# Copyright 2018-2020 CNRS
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import inspect
from datetime import datetime
from .base import KGObject, KGQuery, cache, Field, Distribution
from .commons import QuantitativeValue, MorphologyType, BrainRegion, SomaType, ObjectiveType
from .core import Subject, Person, Protocol
from .minds import Dataset
from .utility import compact_uri, standard_context, as_list
from .experiment import Slice
from .electrophysiology import PatchedCell, PatchedSlice
from .optophysiology import Position
DEFAULT_NAMESPACE = "neuralactivity"
class LabeledCell(KGObject):
"""A labeled cell used in a morphology study."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/labeledcell/v0.1.1"
type = ["nsg:LabeledCell", "prov:Entity"]
query_id = "fgModified"
query_id_resolved = "fgResolvedModified"
collection_class = "LabeledCellCollection"
experiment_class = "PatchClampExperiment"
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"brainRegion": "nsg:brainRegion",
"mType": "nsg:mType",
"position": "nsg:position",
"spatialCellName": "nsg:spatialCellName",
"reconstructionRequested": "nsg:reconstructionRequested",
"reconstructable": "nsg:reconstructable"
}
fields = (
Field("name", str, "name", required=True),
Field("project_name", str, "projectName"),
Field("brain_location", BrainRegion, "brainRegion", multiple=True),
Field("morphology_type", MorphologyType, "mType"), # specifies the coordinates of the location of the cell in the slice
Field("location_in_slice", Position, "position"), #change to 3Dvector
Field("spatial_cell_name", str, "spatialCellName"), # spatial cell name given during the annotation process
Field("reconstruction_requested", bool, "reconstructionRequested"), # indicates if reconstruction the cell has been requested or not
Field("reconstructable", bool, "reconstructable"), #indicates if the cell can be reconstructed or not
Field("patched_cell", PatchedCell, "wasRevisionOf"),
Field("collection", "morphology.LabeledCellCollection", "^prov:hadMember",
reverse="labeled_cell") #chance reverse when labeledcellcollationmade
)
def __init__(self, name, project_name, brain_location, morphology_type=None,
location_in_slice=None, spatial_cell_name=None, reconstruction_requested=None,
reconstructable=None, patched_cell=None, collection=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class LabeledCellCollection(KGObject):
"""A collection of labeled cells."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/labeledcellcollection/v0.1.1"
type = ["nsg:Collection"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"size": "schema:size",
"hadMember": "prov:hadMember"
}
fields = (
Field("name", str, "name", required=True),
Field("cells", LabeledCell, "hadMember", required=True, multiple=True),
Field("slice", "morphology.AnnotatedSlice", "^nsg:hasPart", reverse="recorded_cells") # chcek reverse
)
def __init__(self, name,cells, slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
@property
def size(self):
return len(self.cells)
class FixedStainedSlice(KGObject):
"""An fixed, stained slice from a morphology experiment."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/fixedstainedslice/v0.1.1/"
type = ["nsg:FixedStainedSlice", "prov:Entity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"dcterms": "http://purl.org/dc/terms/",
"name": "schema:name",
"wasRevisionOf": "prov:wasRevisionOf"
}
fields = (
Field("name", str, "name", required=True),
Field("patched_slice", PatchedSlice, "wasRevisionOf")
)
def __init__(self, name, patched_slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class AnnotatedSlice(KGObject):
"""An annotated slice from a morphology experiment."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/annotatedslice/v0.1.1/"
type = ["nsg:AnnotatedSlice", "prov:Entity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"dcterms": "http://purl.org/dc/terms/",
"name": "schema:name",
"annotationAngle": "nsg:annotationAngle",
"annotatorComment": "nsg:annotatorComment",
"hasPart": "schema:hasPart",
"wasRevisionOf": "prov:wasRevisionOf"
}
fields = (
Field("name", str, "name", required=True),
Field("annotation_angle", QuantitativeValue, "annotationAngle"),
Field("annotator_comment", str, "annotatorComment"),
Field("cell_collection", LabeledCellCollection, "hasPart"),
Field("fixed_stained_slice", FixedStainedSlice, "wasRevisionOf")
)
def __init__(self, name, annotation_angle=None, annotator_comment=None,
cell_collection=None, fixed_stained_slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class ReconstructedCell(KGObject):
"""A reconstructed cell."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/reconstructedcell/v0.1.4"
type = ["nsg:ReconstructedCell", "prov:Entity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"brainLocation": "nsg:brainLocation",
"mType": "nsg:mType",
"somaType": "nsg:somaType"
}
fields = (
Field("name", str, "name", required=True),
Field("soma_brain_location", BrainRegion, "brainLocation", multiple=True),
Field("axon_projection", BrainRegion, "brainRegion", multiple=True),
Field("morphology_type", MorphologyType, "mType"),
Field("soma_type", SomaType, "somaType")
)
def __init__(self, name, soma_brain_location=None, axon_projection=None, morphology_type=None,
soma_type=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class FixationStainingMounting(KGObject):
"""Fixing, Staining and Mounting activities description"""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/fixationstainingmounting/v0.1.1"
type = ["nsg:FixationStainingMounting", "prov:Activity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"fixationMethod": "nsg:fixationMethod",
"stain": "nsg:stain",
"mountingMedia": "nsg:mountingMedia",
"used": "prov:used",
"generated": "prov:generated"
}
fields = (
Field("name", str, "name", required=True),
Field("fixation_method", str, "fixationMethod"),
Field("stain", str, "stain"),
Field("mounting_media", str, "mountingMedia"),
Field("slice_used", Slice, "used"),
Field("slice_generated", FixedStainedSlice, "generated")
)
def __init__(self, name, fixation_method=None, stain=None, mounting_media=None,
slice_used=None, slice_generated=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class AcquisitionAnnotation(KGObject):
"""Acquisition and annotation activity"""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/acquisitionannotation/v0.1.1"
type = ["nsg:AcquisitionAnnotation", "prov:Activity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"objectiveMagnification": "nsg:objectiveMagnification",
"used": "prov:used",
"generated": "prov:generated",
}
fields = (
Field("name", str, "name", required=True),
Field("objective_magnification", str, "objectiveMagnification"),
Field("fixed_stained_slice", FixedStainedSlice, "used"),
Field("annotated_slice", AnnotatedSlice, "generated")
)
def __init__(self, name, objective_magnification=None, fixed_stained_slice=None,
annotated_slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class Reconstruction(KGObject):
"""Reconstruction activity"""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/reconstruction/v0.1.2"
type = ["nsg:Reconstruction", "prov:Activity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"objectiveType": "nsg:objectiveType",
"objectiveMagnification": "nsg:objectiveMagnification",
"compressionCorrection": "nsg:compressionCorrection",
"used": "prov:used",
"generated": "prov:generated",
}
fields = (
Field("name", str, "name", required=True),
Field("objective_type", ObjectiveType, "objectiveType"),
Field("objective_magnification", str, "objectiveMagnification"),
Field("compression_correction", str, "compressionCorrection"),
Field("labeled_cell", LabeledCell, "used"),
Field("reconstructed_cell", ReconstructedCell, "generated")
)
def __init__(self, name, objective_type=None, compression_correction=None, labeled_cell=None,
reconstructed_cell=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
|
the-stack_0_8439 | # Copyright 2019 SCHUFA Holding AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class TreeNode:
"""
A helper class to store the tree structure of a model tree.
Do not instantiate this class directly, but used the model tree classes
Parameters
----------
depth : int, (default=0)
Zero-based depth of the node in the tree
estimator : object
Base estimator of the node.
This estimator is used in leaf nodes for predictions, but can also be stored in other nodes.
children : list or None
List of child nodes. Should have 2 or 0 elements or be None.
split : Split
Defines, how samples are split (and mapped) to the child nodes.
Attributes
----------
depth : int, (default=0)
Zero-based depth of the node in the tree
estimator : object
Base estimator of the node.
This estimator is used in leaf nodes for predictions, but can also be stored in other nodes.
children : list or None
List of child nodes. Should have 2 or 0 elements or be None.
split : Split
Defines, how samples are split (and mapped) to the child nodes.
See Also
--------
modeltrees.tree.BaseModelTree : Base Model Tree implementation
Split : Class that defines how split / mapping to the child nodes
Notes
-----
This is not a sklearn estimator class, but a helper class
"""
def __init__(self, depth=0, estimator=None, children=None, split=None):
self.depth = depth
self.estimator = estimator
self.children = children
self.split = split
def is_leaf(self):
"""
Checks, if the node is a leaf node, i.e. no split is set.
Returns
-------
True, if the node is a leaf node.
"""
return self.split is None
def map_to_leaf(self, X):
"""
Maps input samples to leaf nodes by using split rules and the subtree structure
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input Features of the samples
Returns
-------
leaf_idx: array-like, shape = [n_samples]
For each sample an index of the corresponding leaf node.
leafs: list
A list of leaf nodes. Positions correspond to the indices in `leaf_idx`
"""
if self.is_leaf():
return np.zeros(np.shape(X)[0], dtype=int), [self]
else:
child_idx = self.split.map_to_children(X)
leaf_idx = -np.ones(child_idx.shape, dtype=int)
leafs = []
# Iterate over children
for c in range(len(self.children)):
# Get sample subset for child c
idx = child_idx == c
if np.any(idx):
# Recursively map to leafs
leaf_idx_, leafs_ = self.children[c].map_to_leaf(X[idx])
# Include results into output leaf_idx
# Note that we need to shift the index to avoid return the same leaf index for different leafs.
shift = len(leafs)
leaf_idx[idx] = leaf_idx_ + shift
# Append the new found leafs
leafs = leafs + leafs_
# Return results
return leaf_idx, leafs
class Split:
"""
Defines a splitting of a decision / model tree node, i.e. the mapping of samples to the child node.
This class supports splits based on one feature and threshold.
All samples with a feature value (in the given feature) less or equal to the threshold are mapped to child 0.
All others are mapped to child 1.
Parameters
----------
split_feature : int
Index of the feature that is used for the split
split_threshold : int
Threshold for the split.
Attributes
----------
split_feature : int
Index of the feature that is used for the split
split_threshold : int
Threshold for the split.
"""
def __init__(self, split_feature, split_threshold):
self.split_feature = split_feature
self.split_threshold = split_threshold
def _apply_split(self, X, y = None):
"""
Splits a set samples according to the defines split rule in split.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input Features of the samples
y : array-like, shape = [n_samples] or [n_samples, n_outputs], optional
Target variable.
Returns
-------
subsets: list
A list of Subsets. If `y` is `None`, each element `i` is an array with [n_samples[i], n_features].
Otherwise each element is a pair of input features and target variable.
"""
# Check for left subtree
split_filter = X[:, self.split_feature] <= self.split_threshold
# Output depending in input
if y is None:
return [X[split_filter], X[~split_filter]]
else:
return [
(X[split_filter], y[split_filter]), # Samples for the left subtree
(X[~split_filter], y[~split_filter]) # Samples for the right subtree
]
def map_to_children(self, X):
"""
Maps samples to child nodes. This is done based on the split feature and threshold
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input Features of the samples
Returns
-------
child_idx: array-like, shape = [n_samples]
For each sample an index (0 for left child, 1 for right child).
"""
child_idx = 1 - (X[:, self.split_feature] <= self.split_threshold)
return child_idx
|
the-stack_0_8442 | # -*- coding: utf-8 -*-
"""
pytest_instafail
~~~~~~~~~~~~~~~~
py.test plugin to show failures instantly.
:copyright: (c) 2013-2016 by Janne Vanhala.
:license: BSD, see LICENSE for more details.
"""
import pytest
from _pytest.terminal import TerminalReporter
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
'--instafail', action="store_true", dest="instafail", default=False,
help=(
"show failures and errors instantly as they occur (disabled by "
"default)."
)
)
@pytest.mark.trylast
def pytest_configure(config):
if hasattr(config, 'slaveinput'):
return # xdist slave, we are already active on the master
if config.option.instafail and config.pluginmanager.hasplugin('terminalreporter'):
# Get the standard terminal reporter plugin...
standard_reporter = config.pluginmanager.getplugin('terminalreporter')
instafail_reporter = InstafailingTerminalReporter(standard_reporter)
# ...and replace it with our own instafailing reporter.
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(instafail_reporter, 'terminalreporter')
class InstafailingTerminalReporter(TerminalReporter):
def __init__(self, reporter):
TerminalReporter.__init__(self, reporter.config)
self._tw = reporter._tw
def pytest_collectreport(self, report):
# Show errors occurred during the collection instantly.
TerminalReporter.pytest_collectreport(self, report)
if report.failed:
if self.isatty:
self.rewrite('') # erase the "collecting"/"collected" message
self.print_failure(report)
def pytest_runtest_logreport(self, report):
# Show failures and errors occuring during running a test
# instantly.
TerminalReporter.pytest_runtest_logreport(self, report)
if report.failed and not hasattr(report, 'wasxfail'):
if self.verbosity <= 0:
self._tw.line()
self.print_failure(report)
def summary_failures(self):
# Prevent failure summary from being shown since we already
# show the failure instantly after failure has occured.
pass
def summary_errors(self):
# Prevent error summary from being shown since we already
# show the error instantly after error has occured.
pass
def print_failure(self, report):
if self.config.option.tbstyle != "no":
if self.config.option.tbstyle == "line":
line = self._getcrashline(report)
self.write_line(line)
else:
msg = self._getfailureheadline(report)
# "when" was unset before pytest 4.2 for collection errors.
when = getattr(report, "when", "collect")
if when == "collect":
msg = "ERROR collecting " + msg
elif when == "setup":
msg = "ERROR at setup of " + msg
elif when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
if not self.config.getvalue("usepdb"):
self._outrep_summary(report)
|
the-stack_0_8448 | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 5
animation_ttl = range(0, 10)
input_str = event.pattern_match.group(1)
if input_str == "/call":
await event.edit(input_str)
animation_chars = [
"`Connecting To Telegram Headquarters...`",
"`User Authorised.`",
"`Private VOIP Call Connected...`",
"`Me Calling Pavel Durov Shukla....`",
"`Me: Hello Sir, Please Ban This Guys Telegram Account.`",
"`Durov: May I Know Who Is This?`",
"`Me: Yo Brah, I Am` @r4v4n4",
"`Durov: OMG!!! I Am FAN Of You Sir...\nI'll Make Sure That Guy Account Will Get Blocked Within 24Hrs.`",
"`Me: See You Later Brah.`",
"`Private VOIP Call Disconnected.`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
|
the-stack_0_8450 | #!/usr/bin/env python
"""
Determines the frequencies of residue pair contacts in molecular
dynamics simulations. Given one or more MDContact outputs, this
script determines the frequency of each unique interaction of the
form (itype, residue 1, residue2), weighted by number of frames,
across all inputs.
The inputs are one or more MDContact output file paths as well as an
output path. The user may also specify a subset of interaction types
to compute frequencies for. The user may additionally provide a label
file to convert residue labellings (typically for the use of aligning
sequences for performing frequency comparisons with other
trajectories).
The output is a single tsv file with each row indicating residue
id 1, residue id 2, and contact frequency.
"""
from __future__ import division
from collections import defaultdict
import sys
import argparse
def atomid_to_resid(atom):
return atom[0:atom.rfind(":")]
# return ':'.join(atom.split(':')[1:3])
def gen_counts(input_lines, interaction_types, residuelabels=None):
"""
Parse each line in `input_lines` as a line from MDContacts and return interaction-counts for each residue pair. If
`residuelabels` is defined it is used to modify residue identifiers and to filter out residues not indicated.
For example:
inputs = [
"# total_frames: 3",
"\t".join(["0", "hbbb", "A:ALA:1:N", "A:ARG:4:O"]),
"\t".join(["0", "vdw", "A:ALA:1:CB", "A:ARG:4:CA"]),
"\t".join(["1", "vdw", "A:ALA:1:N", "A:CYS:5:CA"]),
"\t".join(["2", "hbbb", "A:THR:2:N", "A:CYS:5:O"]),
"\t".join(["2", "hbss", "A:ALA:1:N", "A:CYS:5:O"])
]
labels = {"A:ALA:1": "A1", "A:ARG:4": "R4", "A:CYS:5": "C5"}
# Only consider hbbb and vdw, filter away THR, and map to single-letter labels
gen_counts(inputs, ["hbbb", "vdw"], labels)
# Returns: { ("A1", "R4"): 1, ("A1", "C5"): 1 }
Parameters
----------
input_lines: Iterable[str]
Interactions formatted as MDContacts output, e.g. ["0\thbbb\tA:ALA:1:N\tA:ARG:4:H", ...]
interaction_types: list of str
Which interaction types to consider
residuelabels: dict of (str: str)
Remaps and filters residuelabels, e.g. {"A:ARG:4": "R4"}
Returns
-------
(int, dict of (str, str): int)
Total frame-count and mapping of residue-residue interactions to frame-count
"""
# Maps residue pairs to set of frames in which they're present
rescontact_frames = defaultdict(set)
total_frames = 0
for line in input_lines:
line = line.strip()
if "total_frames" in line:
tokens = line.split(" ")
total_frames = int(tokens[1][tokens[1].find(":")+1:])
if len(line) == 0 or line[0] == "#":
continue
tokens = line.split("\t")
# Check that the interaction type is specified
itype = tokens[1]
if itype not in interaction_types:
continue
frame = int(tokens[0])
if frame + 1 > total_frames:
total_frames = frame + 1
res1 = atomid_to_resid(tokens[2])
res2 = atomid_to_resid(tokens[3])
# Change residue id according to `residuelabels` or skip if any of the residues are not present
if residuelabels is not None:
if res1 not in residuelabels or res2 not in residuelabels:
continue
res1 = residuelabels[res1]
res2 = residuelabels[res2]
# Ensure lexicographical order of residue names
if res2 < res1:
res1, res2 = res2, res1
rescontact_frames[(res1, res2)].add(frame)
# Insted of returning list of frames for each interaction, only return number of frames
rescontact_counts = {(res1, res2): len(frames) for (res1, res2), frames in rescontact_frames.items()}
return total_frames, rescontact_counts
def parse_labelfile(label_file):
"""
Parses a label-file and returns a dictionary with the residue label mappings. Unless prepended with a comment-
indicator (#), each line is assumed to have a valid residue identifier (e.g. "A:ALA:1") and a label which the
residue should be mapped to (e.g. "A1").
Example:
parse_labelfile(["A:ALA:1\tA1")
# Returns {"A:ALA:1": "A1"}
Parameters
----------
label_file: Iterable[str]
Lines with tab-separated residue identifier and label
Returns
-------
dict of str: str
Mapping from residue-id in contact-file to label of any format
"""
ret = {}
for line in label_file:
line = line.strip()
# Ignore line if empty or comment
if line[0] == "#" or len(line) == 0:
continue
tokens = line.split("\t")
ret[tokens[0]] = tokens[1]
return ret
def gen_frequencies(count_list):
"""
Take a list of residue contact counts (see output of `gen_counts`) and compute total counts and frequencies.
Example:
clist = [
(4, {("A1", "R4"): 4, ("A1", "C5"): 3}), # First simulation has 4 frames and two contacts
(3, {("A1", "R4"): 2}) # Second simulation has 3 frames and one contact
]
gen_frequencies(clist)
# Returns: (7, {("A1", "R4"): (6, 0.857), ("A1", "C5"): (3, 0.429)})
Parameters
----------
count_list: list of (int, dict of (str, str): int)
List with individual frame counts and dictionaries mapping residue pairs to frame-counts
Return
------
(int, dict of (str, str): (int, float))
Total framecount and mapping of residue ID pairs to the number of frames in which they contact and the frequency
"""
rescontact_count = defaultdict(int)
total_frames = 0
for frames, rescount_dict in count_list:
total_frames += frames
for (res1, res2), count in rescount_dict.items():
rescontact_count[(res1, res2)] += count
respair_freqs = {respair: (count, float(count) / total_frames) for respair, count in rescontact_count.items()}
return total_frames, respair_freqs
def main():
# Parse command line arguments
class MyParser(argparse.ArgumentParser):
def error(self, message):
# Prints full program help when error occurs
self.print_help(sys.stderr)
sys.stderr.write('\nError: %s\n' % message)
sys.exit(2)
parser = MyParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--input_files',
type=argparse.FileType('r'),
required=True,
nargs='+',
metavar='FILE.tsv',
help="Path to one or more contact-file outputs")
parser.add_argument('--label_file',
type=argparse.FileType('r'),
required=False,
metavar='FILE.tsv',
help="A label file for standardizing residue names between different proteins")
parser.add_argument('--output_file',
type=argparse.FileType('w'),
required=True,
metavar='FILE.tsv',
help="Path to output file")
parser.add_argument('--itypes',
required=False,
default="all",
type=str,
nargs="+",
metavar="ITYPE",
help='Include only these interaction types in frequency computation. Valid choices are: \n'
'* all (default), \n'
'* sb (salt-bridges), \n'
'* pc (pi-cation), \n'
'* ps (pi-stacking), \n'
'* ts (t-stacking), \n'
'* vdw (van der Waals), \n'
'* hbbb, hbsb, hbss, (hydrogen bonds with specific backbone/side-chain profile)\n'
'* wb, wb2 (water-bridges and extended water-bridges) \n'
'* hls, hlb (ligand-sidechain and ligand-backbone hydrogen bonds), \n'
'* lwb, lwb2 (ligand water-bridges and extended water-bridges)')
# results, unknown = parser.parse_known_args()
args = parser.parse_args()
# Update itypes if "all" is specified
if "all" in args.itypes:
args.itypes = ["sb", "pc", "ps", "ts", "vdw", "hb", "lhb", "hbbb", "hbsb",
"hbss", "wb", "wb2", "hls", "hlb", "lwb", "lwb2"]
output_file = args.output_file
input_files = args.input_files
itypes = args.itypes
labels = parse_labelfile(args.label_file) if args.label_file else None
counts = [gen_counts(input_file, itypes, labels) for input_file in input_files]
total_frames, frequencies = gen_frequencies(counts)
output_file.write('#\ttotal_frames:%d\tinteraction_types:%s\n' % (total_frames, ','.join(itypes)))
output_file.write('#\tColumns:\tresidue_1,\tresidue_2\tframe_count\tcontact_frequency\n')
for (res1, res2), (count, frequency) in frequencies.items():
output_file.write('\t'.join([res1, res2, "%.3f" % frequency]) + "\n")
if __name__ == '__main__':
main()
|
the-stack_0_8451 | """
Makes a chromosome or plasmid item
Example mouse chromosome 5
https://www.wikidata.org/wiki/Q15304656
Example yeast chromosome XII
https://www.wikidata.org/wiki/Q27525657
"""
import os
from datetime import datetime
from io import StringIO
from urllib import request
import pandas as pd
from scheduled_bots import get_default_core_props
from wikidataintegrator import wdi_core, wdi_helpers
core_props = get_default_core_props()
class ChromosomeBot:
chr_type_map = {'Chromosome': 'Q37748',
'Mitochondrion': 'Q18694495',
'Chloroplast': 'Q22329079'}
def __init__(self):
self.retrieved = None
self.login = None
self.ass_sum = None
self.chr_df = dict()
def get_assembly_summaries(self):
table = request.urlopen(request.Request('ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/assembly_summary_refseq.txt')).read().decode()
names = table.split("\n")[1].replace("# ", "").split("\t")
self.ass_sum = pd.read_csv(StringIO(table), sep="\t", comment="#", names=names, low_memory=False)
def get_assembly_report(self, taxid):
if self.ass_sum is None:
self.get_assembly_summaries()
df = self.ass_sum.query("taxid == {} & refseq_category == 'reference genome'".format(taxid))
if len(df) == 0:
# try "representative genome" (needed for mouse and rat)
df = self.ass_sum.query("taxid == {} & refseq_category == 'representative genome'".format(taxid))
if len(df) != 1:
raise ValueError("unknown reference: {}".format(df))
print(df)
ftp_path = list(df.ftp_path)[0]
assembly = os.path.split(ftp_path)[1]
url = os.path.join(ftp_path, assembly + "_assembly_report.txt")
print(url)
# read the column names from the file
table = request.urlopen(request.Request(url)).read().decode()
names = [x for x in table.split("\n") if x.startswith("#")][-1].strip().replace("# ", "").split("\t")
self.chr_df[taxid] = pd.read_csv(StringIO(table), sep="\t", names=names, comment='#')
self.chr_df[taxid] = self.chr_df[taxid].rename(columns={'Sequence-Name': 'SequenceName', 'Sequence-Role': 'SequenceRole',
'Assigned-Molecule': 'AssignedMolecule',
'Assigned-Molecule-Location/Type': 'AssignedMoleculeLocationType',
'GenBank-Accn': 'GenBankAccn', 'RefSeq-Accn': 'RefSeqAccn',
'UCSC-style-name': 'UCSCstylename'})
#print(self.chr_df[taxid].query("SequenceRole == 'assembled-molecule'"))
def get_chrom_info(self, chr_name, taxid):
""" result looks like:
{'Assembly-Unit': 'C57BL/6J',
'Assigned-Molecule': '1',
'Assigned-Molecule-Location/Type': 'Chromosome',
'GenBank-Accn': 'CM000994.2',
'RefSeq-Accn': 'NC_000067.6',
'Relationship': '=',
'Sequence-Length': 195471971,
'Sequence-Name': '1',
'Sequence-Role': 'assembled-molecule',
'UCSC-style-name': 'chr1'}
"""
if taxid not in self.chr_df:
self.get_assembly_report(taxid)
df = self.chr_df[taxid].query("SequenceRole == 'assembled-molecule'")
d_list = df[(df.SequenceName == chr_name) | (df.AssignedMolecule == chr_name) | (df.UCSCstylename == chr_name)].to_dict('records')
if len(d_list) == 1:
return d_list[0]
def get_or_create(self, organism_info, retrieved=None, login=None):
"""
Make sure all chromosome items exist
return a map of chr num to wdid. looks like:
{'1': 'Q28114580', '2': 'Q28114581', ..., 'MT': 'Q28114585'}
:param organism_info: {'name': name, 'taxid': taxid, 'wdid': wdid, 'type': type}
:type organism_info: dict
:param retrieved: for reference statement
:type retrieved: datetime
:param login:
:return:
"""
self.login = login
self.retrieved = datetime.now() if retrieved is None else retrieved
taxid = int(organism_info['taxid'])
if taxid not in self.chr_df:
self.get_assembly_report(taxid)
# map of chr_num to wdid for this taxon ("1" -> "Q1234")
chr_num_wdid = dict()
# get assembled chromosomes, which we will create items for
chrdf = self.chr_df[taxid][self.chr_df[taxid]['SequenceRole'] == 'assembled-molecule']
existing_chr = wdi_helpers.id_mapper("P2249")
existing_chr = {k.split(".")[0]: v for k, v in existing_chr.items()}
for record in chrdf.to_dict("records"):
chrom_num = record['SequenceName']
# if a field has "chr" in it, remove it
chrom_num = chrom_num.replace("chr", "").replace("Chr", "").replace("CHR", "")
genome_id = record['RefSeqAccn']
genome_id = genome_id.split(".")[0]
chr_type = record['AssignedMoleculeLocationType']
# {'Chromosome','Mitochondrion'}
# chrom_type = record['Assigned-Molecule-Location/Type']
if genome_id in existing_chr:
chr_num_wdid[chrom_num] = existing_chr[genome_id]
else:
# chromosome doesn't exist in wikidata. create it
print("chromosome being created: {}, {}".format(chrom_num, genome_id))
chr_num_wdid[chrom_num] = self.create_chrom(organism_info, chrom_num, genome_id, chr_type, login)
return chr_num_wdid
def create_chrom(self, organism_info, chrom_num, genome_id, chr_type, login):
def make_ref(retrieved, genome_id):
"""
Create reference statement for chromosomes
:param retrieved: datetime
:type retrieved: datetime
:param genome_id: refseq genome id
:type genome_id: str
:return:
"""
refs = [
wdi_core.WDItemID(value='Q20641742', prop_nr='P248', is_reference=True), # stated in ncbi gene
wdi_core.WDString(value=genome_id, prop_nr='P2249', is_reference=True), # Link to Refseq Genome ID
wdi_core.WDTime(retrieved.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True)
]
return refs
item_name = '{} chromosome {}'.format(organism_info['name'], chrom_num)
item_description = '{} chromosome'.format(organism_info['type']) if organism_info['type'] else "chromosome"
print(genome_id)
reference = make_ref(self.retrieved, genome_id)
# instance of chr_type
if chr_type not in ChromosomeBot.chr_type_map:
raise ValueError("unknown chromosome type: {}".format(chr_type))
statements = [wdi_core.WDItemID(value=ChromosomeBot.chr_type_map[chr_type], prop_nr='P31', references=[reference])]
# found in taxon
statements.append(wdi_core.WDItemID(value=organism_info['wdid'], prop_nr='P703', references=[reference]))
# genome id
statements.append(wdi_core.WDString(value=genome_id, prop_nr='P2249', references=[reference]))
wd_item = wdi_core.WDItemEngine(data=statements,
append_value=['P31'], fast_run=True,
fast_run_base_filter={'P703': organism_info['wdid'], 'P2249': ''},
core_props=core_props)
if wd_item.wd_item_id:
return wd_item.wd_item_id
wd_item.set_label(item_name)
wd_item.set_description(item_description, lang='en')
wdi_helpers.try_write(wd_item, genome_id, 'P2249', login)
return wd_item.wd_item_id
|
the-stack_0_8452 | import math
# ZTest
def startZTest(populationAverage, sampleAverage, populationStrdDeviation, sampleSize):
standardError = populationStrdDeviation / math.sqrt(sampleSize)
observedValue = (sampleAverage - populationAverage) / standardError
print("ZTest: " + str(observedValue))
return observedValue
# Euclidean Distance
def startED(p, q, length):
sum = 0.0
for i in range(0, length):
sum = math.pow(p[i] - q[i], 2) + sum
euclideanDistance = math.sqrt(sum)
print("ED: " + str(euclideanDistance))
return euclideanDistance
# Piecewise Aggregate Approximation
def startPAA(pieceCount, dataList):
count = 0
remainderCount = 1
sum = 0.0
i = 0
interval = len(dataList) / pieceCount
remainder = len(dataList) % pieceCount
paaList = [0 for _ in range(pieceCount)]
for data in dataList:
sum = sum + float(data)
count = count + 1
if remainderCount <= remainder:
if count == (interval + 1):
average = sum / interval + 1
paaList[i] = average
remainderCount = remainderCount + 1
i = i + 1
sum = 0.0
count = 0
else:
if count == interval:
average = sum / interval
paaList[i] = average
i = i + 1
sum = 0.0
count = 0
return paaList
# Jensen Shannon Divergence
def startJSD(p, q):
middle = [0 for _ in range(len(p))]
for i in range(0, len(p)):
middle[i] = (p[i] + q[i]) / 2
divergence = (startKLD(p, middle) + startKLD(q, middle)) / 2
print("JSD: " + str(divergence))
return divergence
# Kullback Leibler Divergence
def startKLD(p, q):
divergence = 0.0
for i in range(0, len(p)):
tmp = 0.0
if p[i] != 0.0:
tmp = p[i] * (math.log10(p[i]) - math.log10(q[i]))
divergence = divergence + tmp
return divergence
|
the-stack_0_8453 | # coding: utf8
from __future__ import unicode_literals
SPACY_MODELS = {}
VECTORS = {}
def get_spacy(lang, **kwargs):
global SPACY_MODELS
import spacy
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(lang, **kwargs)
return SPACY_MODELS[lang]
def register_vectors(ops, lang, data):
key = (ops.device, lang)
VECTORS[key] = data
def get_vectors(ops, lang):
global VECTORS
key = (ops.device, lang)
if key not in VECTORS:
nlp = get_spacy(lang)
VECTORS[key] = nlp.vocab.vectors.data
return VECTORS[key]
|
the-stack_0_8455 | #!/usr/bin/python3
# Adapted from https://github.com/openai/mujoco-py/blob/master/vendor/Xdummy-entrypoint
# Copyright OpenAI; MIT License
import argparse
import os
import sys
import subprocess
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args, extra_args = parser.parse_known_args()
subprocess.Popen(
[
"nohup",
"Xorg",
"-noreset",
"+extension",
"GLX",
"+extension",
"RANDR",
"+extension",
"RENDER",
"-logfile",
"/tmp/xdummy.log",
"-config",
"/etc/dummy_xorg.conf",
":0",
]
)
subprocess.Popen(
["nohup", "Xdummy"],
stdout=open("/dev/null", "w"),
stderr=open("/dev/null", "w"),
)
os.environ["DISPLAY"] = ":0"
if not extra_args:
argv = ["/bin/bash"]
else:
argv = extra_args
# Explicitly flush right before the exec since otherwise things might get
# lost in Python's buffers around stdout/stderr (!).
sys.stdout.flush()
sys.stderr.flush()
os.execvpe(argv[0], argv, os.environ)
|
the-stack_0_8458 | from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from pirates.util.PythonUtil import reduceAngle, fitSrcAngle2Dest
from pirates.util.PythonUtilPOD import clampScalar, getSetter, ParamObj
from direct.task import Task
from otp.otpbase import OTPGlobals
from pirates.pirate import CameraMode
from pirates.piratesbase import PiratesGlobals
import math
class OrbitCamera(CameraMode.CameraMode, NodePath, ParamObj):
notify = DirectNotifyGlobal.directNotify.newCategory('OrbitCamera')
class ParamSet(ParamObj.ParamSet):
Params = {
'lookAtOffset': Vec3(0, 0, 0),
'escapement': 10.0,
'rotation': 0.0,
'fadeGeom': False,
'idealDistance': 25.0,
'minDistance': 3.0,
'maxDistance': 40.0,
'minEsc': -20.0,
'maxEsc': 25.0,
'minDomeEsc': 0.0,
'maxCamtiltEsc': 0.0,
'autoFaceForward': True,
'autoFaceForwardMaxDur': 14.0 }
UpdateTaskName = 'OrbitCamUpdateTask'
CollisionCheckTaskName = 'OrbitCamCollisionTask'
GeomFadeLerpDur = 1.0
PullFwdDist = 2.0
def __init__(self, subject, params = None):
ParamObj.__init__(self)
NodePath.__init__(self, self._getTopNodeName())
CameraMode.CameraMode.__init__(self)
self.setSubject(subject)
self.lookAtNode = NodePath('orbitCamLookAt')
self.escapementNode = self.attachNewNode('orbitCamEscapement')
self.camParent = self.escapementNode.attachNewNode('orbitCamParent')
self._paramStack = []
if params is None:
self.setDefaultParams()
else:
params.applyTo(self)
self._isAtRear = True
self._rotateToRearIval = None
self._lockAtRear = False
def destroy(self):
self._paramStack = None
self.escapemntNode = None
self.camParent = None
self.lookAtNode.removeNode()
del self.subject
CameraMode.CameraMode.destroy(self)
NodePath.removeNode(self)
ParamObj.destroy(self)
def getName(self):
return 'Orbit'
def _getTopNodeName(self):
return 'OrbitCam'
def setSubject(self, subject = None):
self.subject = subject
def getSubject(self):
return self.subject
def pushParams(self):
self._paramStack.append(self.ParamSet(self))
def popParams(self):
curParams = self.ParamSet(self)
if len(self._paramStack):
self._paramStack.pop().applyTo(self)
else:
OrbitCamera.notify.warning('param stack underflow')
return curParams
def getLookAtOffset(self):
return self.lookAtOffset
def setLookAtOffset(self, lookAtOffset):
self.lookAtOffset = Vec3(lookAtOffset)
def applyLookAtOffset(self):
if self.isActive():
self.lookAtNode.setPos(self.lookAtOffset)
self.setFluidPos(render, self.lookAtNode.getPos(render))
camera.lookAt(self.lookAtNode)
def getEscapement(self):
return self.escapement
def setEscapement(self, escapement):
self.escapement = escapement
def applyEscapement(self):
if self.isActive():
if self.escapement >= self._minDomeEsc:
domeEsc = self.escapement
camEsc = 0.0
elif self.escapement <= self._maxCamtiltEsc:
domeEsc = self._minDomeEsc
camEsc = self._maxCamtiltEsc - self.escapement
else:
domeEsc = self._minDomeEsc
camEsc = 0.0
self.escapementNode.setP(-domeEsc)
self.camParent.setP(camEsc)
def _lerpEscapement(self, escapement, duration = None):
curEsc = self.getEscapement()
escapement = clampScalar(escapement, self._minEsc, self._maxEsc)
if duration is None:
diff = abs(curEsc - escapement)
speed = (max(curEsc, self._maxEsc) - min(curEsc, self._minEsc)) * 0.025000000000000001
duration = diff / speed
self._stopEscapementLerp()
self._escLerpIval = LerpFunctionInterval(self.setEscapement, fromData = curEsc, toData = escapement, duration = duration, blendType = 'easeOut', name = 'OrbitCamera.escapementLerp')
self._escLerpIval.start()
def _stopEscapementLerp(self):
if self._escLerpIval is not None and self._escLerpIval.isPlaying():
self._escLerpIval.pause()
self._escLerpIval = None
def getRotation(self):
return self.getH(self.subject)
def setRotation(self, rotation):
self._rotation = rotation
if self.subject:
self.setH(self.subject, rotation)
def getFadeGeom(self):
return self._fadeGeom
def setFadeGeom(self, fadeGeom):
self._fadeGeom = fadeGeom
def applyFadeGeom(self):
if self.isActive():
if not (self._fadeGeom) and self.getPriorValue():
if hasattr(self, '_hiddenGeoms'):
for np in self._hiddenGeoms.keys():
self._unfadeGeom(np)
self._hiddenGeoms = { }
def getIdealDistance(self):
return self.idealDistance
def setIdealDistance(self, idealDistance):
self.idealDistance = idealDistance
def applyIdealDistance(self):
if self.isActive():
self.idealDistance = clampScalar(self.idealDistance, self._minDistance, self._maxDistance)
if self._practicalDistance is None:
self._zoomToDistance(self.idealDistance)
def popToIdealDistance(self):
self._setCurDistance(self.idealDistance)
def setPracticalDistance(self, practicalDistance):
if practicalDistance is not None and practicalDistance > self.idealDistance:
practicalDistance = None
if self._practicalDistance is None:
if practicalDistance is None:
return None
self._stopZoomIval()
self._setCurDistance(practicalDistance)
else:
self._stopZoomIval()
if practicalDistance is None:
self._zoomToDistance(self.idealDistance)
else:
self._setCurDistance(practicalDistance)
self._practicalDistance = practicalDistance
def getMinDistance(self):
return self._minDistance
def setMinDistance(self, minDistance):
self._minDistance = minDistance
def applyMinDistance(self):
if self.isActive():
self.setIdealDistance(self.idealDistance)
def getMaxDistance(self):
return self._maxDistance
def setMaxDistance(self, maxDistance):
self._maxDistance = maxDistance
def applyMaxDistance(self):
if self.isActive():
self.setIdealDistance(self.idealDistance)
if hasattr(self, '_collSolid'):
self._collSolid.setPointB(0, -(self._maxDistance + OrbitCamera.PullFwdDist), 0)
def getMinEsc(self):
return self._minEsc
def getMaxEsc(self):
return self._maxEsc
def getMinDomeEsc(self):
return self._minDomeEsc
def getMaxCamtiltEsc(self):
return self._maxCamtiltEsc
def setMinEsc(self, minEsc):
self._minEsc = minEsc
def setMaxEsc(self, maxEsc):
self._maxEsc = maxEsc
def setMinDomeEsc(self, minDomeEsc):
self._minDomeEsc = minDomeEsc
def setMaxCamtiltEsc(self, maxCamtiltEsc):
self._maxCamtiltEsc = maxCamtiltEsc
def enterActive(self):
CameraMode.CameraMode.enterActive(self)
self.reparentTo(render)
self.clearTransform()
self.setH(self.subject, self._rotation)
self.setP(0)
self.setR(0)
self.camParent.clearTransform()
camera.reparentTo(self.camParent)
camera.clearTransform()
base.camNode.setLodCenter(self.subject)
if base.wantEnviroDR:
base.enviroCamNode.setLodCenter(self.subject)
self.lookAtNode.reparentTo(self.subject)
self.lookAtNode.clearTransform()
self.lookAtNode.setPos(self.lookAtOffset)
self.setFluidPos(render, self.lookAtNode.getPos(render))
self.escapementNode.setP(-(self.escapement))
self._setCurDistance(self.idealDistance)
camera.lookAt(self.lookAtNode)
self._disableRotateToRear()
self._isAtRear = True
self._rotateToRearIval = None
self._lockAtRear = False
self._zoomIval = None
self._escLerpIval = None
self._practicalDistance = None
self._startUpdateTask()
self._startCollisionCheck()
def exitActive(self):
taskMgr.remove(OrbitCamera.UpdateTaskName)
self.ignoreAll()
self._stopZoomIval()
self._stopEscapementLerp()
self._stopRotateToRearIval()
self._stopCollisionCheck()
self._stopUpdateTask()
self.lookAtNode.detachNode()
self.detachNode()
base.camNode.setLodCenter(NodePath())
if base.wantEnviroDR:
base.enviroCamNode.setLodCenter(NodePath())
CameraMode.CameraMode.exitActive(self)
def _startUpdateTask(self):
self.lastSubjectH = self.subject.getH(render)
taskMgr.add(self._updateTask, OrbitCamera.UpdateTaskName, priority = 40)
self._updateTask()
def _updateTask(self, task = None):
self.setFluidPos(render, self.lookAtNode.getPos(render))
curSubjectH = self.subject.getH(render)
if self._lockAtRear:
self.setRotation(0.0)
elif self._rotateToRearEnabled and self.getAutoFaceForward():
relH = reduceAngle(self.getH(self.subject))
absRelH = abs(relH)
if absRelH < 0.10000000000000001:
self.setRotation(0.0)
self._stopRotateToRearIval()
self._lockAtRear = True
else:
ivalPlaying = self._rotateToRearIvalIsPlaying()
if ivalPlaying and curSubjectH == self.lastSubjectH:
pass
else:
self._stopRotateToRearIval()
duration = self._autoFaceForwardMaxDur * absRelH / 180.0
targetH = curSubjectH
startH = fitSrcAngle2Dest(self.getH(render), targetH)
self._rotateToRearIval = LerpHprInterval(self, duration, Point3(targetH, 0, 0), startHpr = Point3(startH, 0, 0), other = render, blendType = 'easeOut')
self._rotateToRearIval.start()
self.lastSubjectH = curSubjectH
self.setP(0)
self.setR(0)
camera.clearMat()
return Task.cont
def _stopUpdateTask(self):
taskMgr.remove(OrbitCamera.UpdateTaskName)
def setAutoFaceForward(self, autoFaceForward):
if not autoFaceForward:
self._stopRotateToRearIval()
self._autoFaceForward = autoFaceForward
def getAutoFaceForward(self):
return self._autoFaceForward
def setAutoFaceForwardMaxDur(self, autoFaceForwardMaxDur):
self._autoFaceForwardMaxDur = autoFaceForwardMaxDur
def getAutoFaceForwardMaxDur(self):
return self._autoFaceForwardMaxDur
def _enableRotateToRear(self):
self._rotateToRearEnabled = True
def _disableRotateToRear(self):
self._stopRotateToRearIval()
self._rotateToRearEnabled = False
def _rotateToRearIvalIsPlaying(self):
if self._rotateToRearIval is not None:
pass
return self._rotateToRearIval.isPlaying()
def _stopRotateToRearIval(self):
if self._rotateToRearIval is not None and self._rotateToRearIval.isPlaying():
self._rotateToRearIval.pause()
self._rotateToRearIval = None
def _getCurDistance(self):
return -self.camParent.getY()
def _setCurDistance(self, distance):
self.camParent.setY(-distance)
def _zoomToDistance(self, distance):
curDistance = self._getCurDistance()
diff = abs(curDistance - distance)
if diff < 0.01:
self._setCurDistance(distance)
return None
speed = (max(curDistance, self._maxDistance) - min(curDistance, self._minDistance)) * 0.5
duration = diff / speed
self._stopZoomIval()
self._zoomIval = LerpPosInterval(self.camParent, duration, Point3(0, -distance, 0), blendType = 'easeOut', name = 'orbitCamZoom', fluid = 1)
self._zoomIval.start()
def _stopZoomIval(self):
if self._zoomIval is not None and self._zoomIval.isPlaying():
self._zoomIval.pause()
self._zoomIval = None
def _startCollisionCheck(self, shipBarrier = 0):
self._collSolid = CollisionSegment(0, 0, 0, 0, -(self._maxDistance + OrbitCamera.PullFwdDist), 0)
collSolidNode = CollisionNode('OrbitCam.CollSolid')
collSolidNode.addSolid(self._collSolid)
if shipBarrier:
collSolidNode.setFromCollideMask(PiratesGlobals.ShipCameraBarrierBitmask)
else:
collSolidNode.setFromCollideMask(OTPGlobals.CameraBitmask | OTPGlobals.CameraTransparentBitmask | OTPGlobals.FloorBitmask)
collSolidNode.setIntoCollideMask(BitMask32.allOff())
self._collSolidNp = self.escapementNode.attachNewNode(collSolidNode)
self._cHandlerQueue = CollisionHandlerQueue()
self._cTrav = CollisionTraverser('OrbitCam.cTrav')
self._cTrav.addCollider(self._collSolidNp, self._cHandlerQueue)
self._hiddenGeoms = { }
self._fadeOutIvals = { }
self._fadeInIvals = { }
taskMgr.add(self._collisionCheckTask, OrbitCamera.CollisionCheckTaskName, priority = 45)
def _collisionCheckTask(self, task = None):
self._cTrav.traverse(render)
self.cTravOnFloor.traverse(render)
if self._fadeGeom:
nonObstrGeoms = dict(self._hiddenGeoms)
numEntries = self._cHandlerQueue.getNumEntries()
if numEntries > 0:
self._cHandlerQueue.sortEntries()
i = 0
while i < numEntries:
collEntry = self._cHandlerQueue.getEntry(i)
intoNode = collEntry.getIntoNodePath()
cMask = intoNode.node().getIntoCollideMask()
if not (cMask & OTPGlobals.CameraTransparentBitmask).isZero():
if intoNode in nonObstrGeoms:
del nonObstrGeoms[intoNode]
self._fadeGeom(intoNode)
else:
cPoint = collEntry.getSurfacePoint(self.escapementNode)
distance = Vec3(cPoint).length()
self.setPracticalDistance(distance - OrbitCamera.PullFwdDist)
break
i += 1
else:
self.setPracticalDistance(None)
for np in nonObstrGeoms.keys():
self._unfadeGeom(np)
elif self._cHandlerQueue.getNumEntries() > 0:
self._cHandlerQueue.sortEntries()
collEntry = self._cHandlerQueue.getEntry(0)
cPoint = collEntry.getSurfacePoint(self.escapementNode)
distance = Vec3(cPoint).length()
self.setPracticalDistance(distance - OrbitCamera.PullFwdDist)
else:
self.setPracticalDistance(None)
distance = self._getCurDistance()
return Task.cont
def _stopCollisionCheck(self):
while len(self._hiddenGeoms):
self._unfadeGeom(self._hiddenGeoms.keys()[0])
del self._hiddenGeoms
del self._fadeOutIvals
del self._fadeInIvals
taskMgr.remove(OrbitCamera.CollisionCheckTaskName)
self._cTrav.removeCollider(self._collSolidNp)
del self._cHandlerQueue
del self._cTrav
self._collSolidNp.detachNode()
del self._collSolidNp
def _fadeGeom(self, np):
if np in self._fadeInIvals:
self._fadeInIvals[np].finish()
del self._fadeInIvals[np]
if np not in self._hiddenGeoms:
hadTransparency = np.getTransparency()
fadeIval = Sequence(Func(np.setTransparency, 1), LerpColorScaleInterval(np, OrbitCamera.GeomFadeLerpDur, VBase4(1, 1, 1, 0), blendType = 'easeInOut'), name = 'OrbitCamFadeGeomOut')
self._hiddenGeoms[np] = hadTransparency
self._fadeOutIvals[np] = fadeIval
fadeIval.start()
def _unfadeGeom(self, np):
if np in self._hiddenGeoms:
if np in self._fadeOutIvals:
self._fadeOutIvals[np].pause()
del self._fadeOutIvals[np]
fadeIval = Sequence(LerpColorScaleInterval(np, OrbitCamera.GeomFadeLerpDur, VBase4(1, 1, 1, 1), blendType = 'easeInOut'), Func(np.setTransparency, self._hiddenGeoms[np]), name = 'OrbitCamFadeGeomIn')
del self._hiddenGeoms[np]
self._fadeInIvals[np] = fadeIval
fadeIval.start()
|
the-stack_0_8459 | # coding: utf-8
# Copyright 2018 Hiroshi Seki
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import numpy
import pytest
import torch
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend import e2e_asr
def make_arg(**kwargs):
defaults = dict(
elayers=4,
subsample="1_2_2_1_1",
etype="blstmp",
eunits=100,
eprojs=100,
dtype="lstm",
dlayers=1,
dunits=300,
atype="location",
aconv_chans=10,
aconv_filts=100,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=320,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=5,
beam_size=3,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
ctc_window_margin=0,
verbose=2,
char_list=["a", "i", "u", "e", "o"],
word_list=["<blank>", "<unk>", "ai", "iu", "ue", "eo", "oa", "<eos>"],
outdir=None,
ctc_type="warpctc",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def init_torch_weight_const(m, val):
for p in m.parameters():
p.data.fill_(val)
def init_torch_weight_random(m, rand_range):
for name, p in m.named_parameters():
p.data.uniform_(rand_range[0], rand_range[1])
# set small bias for <blank> output
if "wordlm.lo.bias" in name or "dec.output.bias" in name:
p.data[0] = -10.0
def init_chainer_weight_const(m, val):
for p in m.params():
p.data[:] = val
def make_small_arg(**kwargs):
return make_arg(
elayers=1,
subsample="1_1",
etype="lstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="dot",
adim=2,
rnnlm="dummy",
lm_weight=0.3,
**kwargs
)
# ctc_weight: 0.0 (attention), 0.5 (hybrid CTC/attention), 1.0 (CTC)
@pytest.mark.parametrize("ctc_weight", [0.0, 0.5, 1.0])
def test_batch_beam_search(ctc_weight):
numpy.random.seed(1)
idim = 10
args = make_small_arg(ctc_weight=ctc_weight)
model = e2e_asr.E2E(idim, 5, args)
torch.manual_seed(1)
rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(len(args.char_list), 2, 2))
init_torch_weight_random(model, (-0.1, 0.1))
init_torch_weight_random(rnnlm, (-0.1, 0.1))
model.eval()
rnnlm.eval()
data = [("aaa", dict(feat=numpy.random.randn(10, idim).astype(numpy.float32)))]
in_data = data[0][1]["feat"]
s_nbest_hyps = model.recognize(in_data, args, args.char_list)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
if ctc_weight > 0.0:
args.ctc_window_margin = 10
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
# Test word LM in batch decoding
rand_range = (-0.01, 0.01)
torch.manual_seed(1)
char_list = ["<blank>", "<space>"] + args.char_list + ["<eos>"]
args = make_small_arg(
ctc_weight=ctc_weight,
ctc_window_margin=10,
beam_size=5,
)
model = e2e_asr.E2E(idim, len(char_list), args)
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(args.word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.word_list), 2, 2)
)
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
init_torch_weight_random(model, rand_range)
init_torch_weight_random(rnnlm, rand_range)
model.eval()
rnnlm.eval()
s_nbest_hyps = model.recognize(in_data, args, char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
|
the-stack_0_8460 | import os
import sys
seed_data = 7
lunarc = int(sys.argv[1])
nbr_params = int(sys.argv[2])
data_set = str(sys.argv[3])
seed = int(sys.argv[4])
# remove disp setting
if lunarc == 1 and 'DISPLAY' in os.environ:
del os.environ['DISPLAY']
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev/hodgkin_huxley')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev/hodgkin_huxley')
import torch
import HodgkinHuxley
import numpy as np
import functions as func
import time
import sys
if lunarc == 1:
sys.path.append('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev/algorithms')
else:
sys.path.append(
'/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev/algorithms')
import snpla as snpla
nbr_samples = int(len(HodgkinHuxley.h.t_vec) * HodgkinHuxley.h.dt)
job = str(data_set) + "_" + str(nbr_params) + "_" + str(nbr_samples) + "_" + str(seed) # + "extended"
# Gen sbi data
model = HodgkinHuxley.HodgkinHuxley(data_set, nbr_params, "snpla")
v_true, Iinj = model.simulator(model.log_theta_true, seed_data, True)
summary_stats_obs = model.calculate_summary_statistics(v_true)
# set up model simulator
def simulator_wrapper(params):
# return tensor
return model.calculate_summary_statistics(model.simulator(params, None))
# run pilot to calc mean and std of summary stats
whiteness_params = func.pilot_run(model, simulator_wrapper, summary_stats_obs)
summary_stats_obs_w = func.whiten(summary_stats_obs, whiteness_params)
def simulator(params):
N = params.shape[0]
data = torch.zeros(params.shape[0], 19)
for i in range(N):
data[i, :] = torch.as_tensor(func.whiten(simulator_wrapper(params[i, :]), whiteness_params))
return data
flow_lik, flow_post = func.set_up_networks(model.prior.low,
model.prior.high,
dim_post=model.nbr_params)
# setting for not exteded:
# decay_rate_post = 0.95
# prob_prior_decay_rate = 0.9
# 1000, 10000
# setting for exteded:
# decay_rate_post = 0.9
# prob_prior_decay_rate = 0.9
# 2000, 10000
optimizer_lik = torch.optim.Adam(flow_lik.parameters())
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=0.001, weight_decay=0.0) # used def value before
decay_rate_post = 0.95 # was 0.95
s_x_o = torch.from_numpy(summary_stats_obs_w).to(dtype=torch.float32).reshape(1, 19)
nbr_rounds = 12
prob_prior_decay_rate = 0.8 # was 0.95
prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
print(prob_prior)
nbr_lik = [2000 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]
nbr_epochs_lik = [100 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]
batch_size = 50
batch_size_post = 2000
nbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]
nbr_epochs_post = [50 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]
#print("----------------")
#print(model.prior.low)
#print(flow_post.sample(1000, context=s_x_o).min(dim=1))
#print("---")
#print(model.prior.high)
#print(flow_post.sample(1000, context=s_x_o).max(dim=1))
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
prior_samples = model.prior.sample(sample_shape=(1,))
print(prior_samples)
print(prior_samples.shape)
data_sets = simulator(prior_samples)
print(prior_samples)
print(data_sets)
print(data_sets.shape)
s_x_o = data_sets
x_o_batch_post = torch.zeros(batch_size_post, 19)
for i in range(batch_size_post):
x_o_batch_post[i, :] = s_x_o
dim_post = nbr_params
start = time.time()
models_lik, models_post = snpla.inference_snpla(flow_lik,
flow_post,
model.prior,
simulator,
optimizer_lik,
optimizer_post,
decay_rate_post,
s_x_o,
x_o_batch_post,
dim_post,
prob_prior,
nbr_lik,
nbr_epochs_lik,
nbr_post,
nbr_epochs_post,
batch_size,
batch_size_post)
end = time.time()
run_time = end - start
L = 5
M = L
K = nbr_params
indications = torch.zeros(K)
post_samples = models_post[-1].sample(M, context=s_x_o)
post_samples = post_samples.reshape((M, K))
for k in range(K):
indications[k] = (post_samples[:, k] < prior_samples[0, k]).sum()
np.savetxt('sbc/ranks_snpla_' + job + '.csv', indications.numpy(), delimiter=",")
|
the-stack_0_8467 | #### Training agent in Pusher7Dof gym env using a single real-world env
## Wrtitten by : leopauly | [email protected]
## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)
####
##Imports
import gym
from gym.spaces import Box, Discrete
import numpy as np
np.set_printoptions(suppress=True)
import cv2
from ddpg import DDPG
from ou_noise import OUNoise
import matplotlib.pyplot as plt
import scipy.misc as misc
## Imports for DNN
import os
from threading import Thread, Lock
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import PIL.Image as Image
import random
import numpy as np
import cv2
import time
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras import backend as K
## Custom scripts
import lscript as lsp
import modelling as md
## Defining env
env = gym.make('Pusher7DOF-v1')
assert isinstance(env.observation_space, Box), "observation space must be continuous"
assert isinstance(env.action_space, Box), "action space must be continuous"
## Defining vars for reinfrocement learning algo
num_episodes=200
num_rollouts=20 # Each roll out represent a complete activity : activity could be pushing an object, reaching to a point or similar !
steps=16 # No of actions taken in a roll out
is_batch_norm = False #batch normalization switch
xrange=range # For python3
start_training=64 # Buffer size, before starting to train the RL algorithm
## vars for feature extraction
height=112
width=112
channel=3
crop_size=112
cluster_length=16 # Length of one activity
nb_classes=2
feature_size=4608 #8192 #16384 #487
#frame_feature_size=
saved_path='/home/ironman/trained_activity_nets/'
demo_folder='./Demo_reach_1/'
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
## FRAME FEATURE EXTRACTION
def frame_feature_extractor(frame_):
frame= preprocess(frame_)
frame=frame.reshape(-1,height,width,channel)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)) as sess:
with tf.device('/cpu:0'):
base_model=keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(height,width,channel), pooling=None, classes=1000)
#base_model=md.get_vgg16_imagenet(summary=True,include_fc=False)
frame_features=base_model.predict(frame)
return frame_features
def preprocess(im):
im = np.float32(im)
im[:,:,2] -= 103.939
im[:,:,1] -= 116.779
im[:,:,0] -= 123.68
im = im[:, :, ::-1] # change to BGR
return im
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
### DEMO FEATURE EXTRACTION
def get_compress_frames_data(filename, num_frames_per_clip=cluster_length):
ret_arr = []
for parent, dirnames, filenames in os.walk(filename):
filenames = sorted(filenames)
jump=math.floor((len(filenames)/num_frames_per_clip))
loop=0
for i in range(0,len(filenames),jump):
if (loop>15):
break
if (filenames[i].endswith('.png')):
image_name = str(filename) + '/' + str(filenames[i])
img = Image.open(image_name)
img_data = np.array(img)
ret_arr.append(img_data)
loop=loop+1
ret_arr=np.array(ret_arr)
#ret_arr=ret_arr/255
return ret_arr
def demo_feature_extractor(demo_vid_path):
demo_vid_array=get_compress_frames_data(demo_vid_path)
return feature_extractor(demo_vid_array)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
### VIDEO FEATURE EXTRACTION
## Defining placeholders in tf for images and targets
x_image = tf.placeholder(tf.float32, [None, 16,height,width,channel],name='x')
y_true = tf.placeholder(tf.float32, [None, nb_classes],name='y_true')
y_true_cls = tf.placeholder(tf.int64, [None],name='y_true_cls')
model_keras = md.C3D_ucf101_training_model_tf(summary=True)
out=model_keras(x_image)
y_pred = tf.nn.softmax(out)
y_pred_cls = tf.argmax(out, dimension=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Loading netwrok framework finished..!!',flush=True)
## Start the session with logging placement.
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
sess.run(init_op)
## Restore model weights from previously saved model
saver = tf.train.import_meta_graph(os.path.join(saved_path,'activity_model.ckpt-104.meta'))
saver.restore(sess, os.path.join(saved_path,'activity_model.ckpt-104'))
print("Model restored from file: %s" % saved_path,flush=True)
## For extracting activity features
def feature_extractor(vid_np):
#print('shape of video for feature extraction:',vid_np.shape)
vid_=vid_np.reshape(-1,cluster_length,height,width,channel)
#print(tf.contrib.graph_editor.get_tensors(tf.get_default_graph()))
#print(tf.get_default_graph().as_graph_def())
f_v = sess.graph.get_tensor_by_name('flatten_1/Reshape:0')
f_v_val=np.array(sess.run([f_v], feed_dict={'conv1_input:0':vid_,x_image:vid_,K.learning_phase(): 0 }))
#print('extracted video features shape:',f_v_val.shape)
features=np.reshape(f_v_val,(-1))
#print('features_shape',features.shape)
return features
def distance(f_demo,f_robo):
#print('shape f_demo',f_demo.shape,'shape f_demo',f_robo.shape)
return np.linalg.norm(f_demo-f_robo)
def s2l():
#Randomly initialize critic,actor,target critic, target actor network and replay buffer
num_states = feature_size #num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print ("Number of States:", num_states)
print ("Number of Actions:", num_actions)
agent = DDPG(env, is_batch_norm,num_states,num_actions)
exploration_noise = OUNoise(env.action_space.shape[0])
counter=0
total_reward=0
print ("Number of Rollouts per episode:", num_rollouts)
print ("Number of Steps per roll out:", steps)
reward_st = np.array([0]) #saving reward
reward_st_all = np.array([0]) #saving reward after every step
demo_features=demo_feature_extractor(demo_folder)
for episode in range(num_episodes):
print ("==== Starting episode no:",episode,"====","\n")
env.reset() # Reset env in the begining of each episode
env.render()
obs_img=env.render(mode='rgb_array') # Get the observation
obs_img=np.array(misc.imresize(obs_img,[112,112,3]))
observation =np.array(frame_feature_extractor(obs_img))
observation=observation.reshape(-1)
reward_per_episode = 0
for t in range(num_rollouts):
reward_per_rollout=0
vid_robo_=[]
for i in range(steps):
x = observation
action = agent.evaluate_actor(np.reshape(x,[1,num_states]))
noise = exploration_noise.noise()
action = action[0] + noise #Select action according to current policy and exploration noise
print ('Action at episode-',episode,'rollout-',t, 'step-', i ," :",action)
_,_,done,info=env.step(action)
env.render()
obs_robo_=env.render(mode='rgb_array') # Get the observation
obs_robo=misc.imresize(obs_robo_,[112,112,3])
vid_robo_.append(obs_robo)
observation=np.array(frame_feature_extractor(np.array(obs_robo)))
observation=observation.reshape(-1)
#pasue()
if(i==15):
vid_robo=np.array(vid_robo_)
robo_features=feature_extractor(vid_robo)
reward=-(distance(demo_features,robo_features))
reward=np.array(reward)
print('reward: ',reward)
else:
reward=0
reward=np.array(reward)
print('reward: ',reward)
reward_st_all = np.append(reward_st_all,reward)
np.savetxt('reward_all.txt',reward_st_all, newline="\n")
#add s_t,s_t+1,action,reward to experience memory
print('x','observation',x.shape,observation.shape)
agent.add_experience(x,observation,action,reward,False)
reward_per_rollout+=reward
counter+=1
#train critic and actor network
if counter > start_training:
agent.train()
print ('\n\n')
reward_per_episode+=reward_per_rollout
#check if episode ends:
print ('EPISODE: ',episode,' Total Reward: ',reward_per_episode)
print ("Printing reward to file")
exploration_noise.reset() #reinitializing random noise for action exploration
reward_st = np.append(reward_st,reward_per_episode)
np.savetxt('episode_reward.txt',reward_st, fmt='%f', newline="\n")
print ('\n\n')
total_reward+=reward_per_episode
print ("Average reward per episode {}".format(total_reward / num_episodes))
s2l()
|
the-stack_0_8468 | # -*- coding: utf-8 -*-
'''
Utils for making various web calls. Primarily designed for REST, SOAP, webhooks
and the like, but also useful for basic HTTP testing.
.. versionaddedd:: 2015.2
'''
from __future__ import absolute_import
# Import python libs
import pprint
import os.path
import json
import logging
# pylint: disable=no-name-in-module
import salt.ext.six.moves.http_cookiejar
import salt.ext.six.moves.urllib.request as urllib_request
# pylint: enable=no-name-in-module
from salt.ext.six import string_types
from salt._compat import ElementTree as ET
import ssl
try:
from ssl import CertificateError # pylint: disable=E0611
from ssl import match_hostname # pylint: disable=E0611
HAS_MATCHHOSTNAME = True
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
HAS_MATCHHOSTNAME = True
except ImportError:
try:
from salt.ext.ssl_match_hostname import CertificateError
from salt.ext.ssl_match_hostname import match_hostname
HAS_MATCHHOSTNAME = True
except ImportError:
HAS_MATCHHOSTNAME = False
import socket
# Import salt libs
import salt.utils
import salt.utils.xmlutil as xml
import salt.loader
import salt.config
import salt.version
from salt.template import compile_template
from salt import syspaths
import salt.ext.six.moves.http_client # pylint: disable=no-name-in-module
# Import 3rd party libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
import msgpack
HAS_MSGPACK = True
except ImportError:
HAS_MSGPACK = False
try:
import certifi
HAS_CERTIFI = True
except ImportError:
HAS_CERTIFI = False
log = logging.getLogger(__name__)
JARFILE = os.path.join(syspaths.CACHE_DIR, 'cookies.txt')
SESSIONJARFILE = os.path.join(syspaths.CACHE_DIR, 'cookies.session.p')
USERAGENT = 'Salt/{0}'.format(salt.version.__version__)
def query(url,
method='GET',
params=None,
data=None,
data_file=None,
header_dict=None,
header_list=None,
header_file=None,
username=None,
password=None,
auth=None,
decode=False,
decode_type='auto',
status=False,
headers=False,
text=False,
cookies=None,
cookie_jar=JARFILE,
cookie_format='lwp',
persist_session=False,
session_cookie_jar=SESSIONJARFILE,
data_render=False,
data_renderer=None,
header_render=False,
header_renderer=None,
template_dict=None,
test=False,
test_url=None,
node='minion',
port=80,
opts=None,
requests_lib=None,
ca_bundle=None,
verify_ssl=None,
cert=None,
text_out=None,
headers_out=None,
decode_out=None,
stream=False,
handle=False,
agent=USERAGENT,
**kwargs):
'''
Query a resource, and decode the return data
'''
ret = {}
if opts is None:
if node == 'master':
opts = salt.config.master_config(
os.path.join(syspaths.CONFIG_DIR, 'master')
)
elif node == 'minion':
opts = salt.config.minion_config(
os.path.join(syspaths.CONFIG_DIR, 'minion')
)
else:
opts = {}
if requests_lib is None:
requests_lib = opts.get('requests_lib', False)
if requests_lib is True:
if HAS_REQUESTS is False:
ret['error'] = ('http.query has been set to use requests, but the '
'requests library does not seem to be installed')
log.error(ret['error'])
return ret
else:
requests_log = logging.getLogger('requests')
requests_log.setLevel(logging.WARNING)
if ca_bundle is None:
ca_bundle = get_ca_bundle(opts)
if verify_ssl is None:
verify_ssl = opts.get('verify_ssl', True)
if cert is None:
cert = opts.get('cert', None)
if data_file is not None:
data = _render(
data_file, data_render, data_renderer, template_dict, opts
)
log.debug('Using {0} Method'.format(method))
if method == 'POST':
log.trace('POST Data: {0}'.format(pprint.pformat(data)))
if header_file is not None:
header_tpl = _render(
header_file, header_render, header_renderer, template_dict, opts
)
if isinstance(header_tpl, dict):
header_dict = header_tpl
else:
header_list = header_tpl.splitlines()
if header_dict is None:
header_dict = {}
if header_list is None:
header_list = []
if persist_session is True and HAS_MSGPACK:
# TODO: This is hackish; it will overwrite the session cookie jar with
# all cookies from this one connection, rather than behaving like a
# proper cookie jar. Unfortunately, since session cookies do not
# contain expirations, they can't be stored in a proper cookie jar.
if os.path.isfile(session_cookie_jar):
with salt.utils.fopen(session_cookie_jar, 'r') as fh_:
session_cookies = msgpack.load(fh_)
if isinstance(session_cookies, dict):
header_dict.update(session_cookies)
else:
with salt.utils.fopen(session_cookie_jar, 'w') as fh_:
msgpack.dump('', fh_)
for header in header_list:
comps = header.split(':')
if len(comps) < 2:
continue
header_dict[comps[0].strip()] = comps[1].strip()
if username and password:
auth = (username, password)
else:
auth = None
if requests_lib is True:
sess = requests.Session()
sess.auth = auth
sess.headers.update(header_dict)
log.trace('Request Headers: {0}'.format(sess.headers))
sess_cookies = sess.cookies
sess.verify = verify_ssl
else:
sess_cookies = None
if cookies is not None:
if cookie_format == 'mozilla':
sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar)
else:
sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar)
if not os.path.isfile(cookie_jar):
sess_cookies.save()
else:
sess_cookies.load()
if agent == USERAGENT:
agent = '{0} http.query()'.format(agent)
header_dict['User-agent'] = agent
if test is True:
if test_url is None:
return {}
else:
url = test_url
ret['test'] = True
if requests_lib is True:
req_kwargs = {}
if stream is True:
if requests.__version__[0] == '0':
# 'stream' was called 'prefetch' before 1.0, with flipped meaning
req_kwargs['prefetch'] = False
else:
req_kwargs['stream'] = True
# Client-side cert handling
if cert is not None:
if isinstance(cert, string_types):
if os.path.exists(cert):
req_kwargs['cert'] = cert
elif isinstance(cert, tuple):
if os.path.exists(cert[0]) and os.path.exists(cert[1]):
req_kwargs['cert'] = cert
else:
log.error('The client-side certificate path that was passed is '
'not valid: {0}'.format(cert))
result = sess.request(
method, url, params=params, data=data, **req_kwargs
)
result.raise_for_status()
if stream is True or handle is True:
return {'handle': result}
result_status_code = result.status_code
result_headers = result.headers
result_text = result.text
result_cookies = result.cookies
else:
request = urllib_request.Request(url, data)
handlers = [
urllib_request.HTTPHandler,
urllib_request.HTTPCookieProcessor(sess_cookies)
]
if url.startswith('https') or port == 443:
if not HAS_MATCHHOSTNAME:
log.warn(('match_hostname() not available, SSL hostname checking '
'not available. THIS CONNECTION MAY NOT BE SECURE!'))
elif verify_ssl is False:
log.warn(('SSL certificate verification has been explicitly '
'disabled. THIS CONNECTION MAY NOT BE SECURE!'))
else:
hostname = request.get_host()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 443))
sockwrap = ssl.wrap_socket(
sock,
ca_certs=ca_bundle,
cert_reqs=ssl.CERT_REQUIRED
)
try:
match_hostname(sockwrap.getpeercert(), hostname)
except CertificateError as exc:
ret['error'] = (
'The certificate was invalid. '
'Error returned was: {0}'.format(
pprint.pformat(exc)
)
)
return ret
# Client-side cert handling
if cert is not None:
cert_chain = None
if isinstance(cert, string_types):
if os.path.exists(cert):
cert_chain = (cert)
elif isinstance(cert, tuple):
if os.path.exists(cert[0]) and os.path.exists(cert[1]):
cert_chain = cert
else:
log.error('The client-side certificate path that was '
'passed is not valid: {0}'.format(cert))
return
if hasattr(ssl, 'SSLContext'):
# Python >= 2.7.9
context = ssl.SSLContext.load_cert_chain(*cert_chain)
handlers.append(urllib_request.HTTPSHandler(context=context)) # pylint: disable=E1123
else:
# Python < 2.7.9
cert_kwargs = {
'host': request.get_host(),
'port': port,
'cert_file': cert_chain[0]
}
if len(cert_chain) > 1:
cert_kwargs['key_file'] = cert_chain[1]
handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs)
opener = urllib_request.build_opener(*handlers)
for header in header_dict:
request.add_header(header, header_dict[header])
request.get_method = lambda: method
result = opener.open(request)
if stream is True or handle is True:
return {'handle': result}
result_status_code = result.code
result_headers = result.headers.headers
result_text = result.read()
if isinstance(result_headers, list):
result_headers_dict = {}
for header in result_headers:
comps = header.split(':')
result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip()
result_headers = result_headers_dict
log.debug('Response Status Code: {0}'.format(result_status_code))
log.trace('Response Headers: {0}'.format(result_headers))
log.trace('Response Cookies: {0}'.format(sess_cookies))
try:
log.trace('Response Text: {0}'.format(result_text))
except UnicodeEncodeError as exc:
log.trace(('Cannot Trace Log Response Text: {0}. This may be due to '
'incompatibilities between requests and logging.').format(exc))
if text_out is not None and os.path.exists(text_out):
with salt.utils.fopen(text_out, 'w') as tof:
tof.write(result_text)
if headers_out is not None and os.path.exists(headers_out):
with salt.utils.fopen(headers_out, 'w') as hof:
hof.write(result_headers)
if cookies is not None:
sess_cookies.save()
if persist_session is True and HAS_MSGPACK:
# TODO: See persist_session above
if 'set-cookie' in result_headers:
with salt.utils.fopen(session_cookie_jar, 'w') as fh_:
session_cookies = result_headers.get('set-cookie', None)
if session_cookies is not None:
msgpack.dump({'Cookie': session_cookies}, fh_)
else:
msgpack.dump('', fh_)
if status is True:
ret['status'] = result_status_code
if headers is True:
ret['headers'] = result_headers
if decode is True:
if decode_type == 'auto':
content_type = result_headers.get(
'content-type', 'application/json'
)
if 'xml' in content_type:
decode_type = 'xml'
elif 'json' in content_type:
decode_type = 'json'
else:
decode_type = 'plain'
valid_decodes = ('json', 'xml', 'plain')
if decode_type not in valid_decodes:
ret['error'] = (
'Invalid decode_type specified. '
'Valid decode types are: {0}'.format(
pprint.pformat(valid_decodes)
)
)
log.error(ret['error'])
return ret
if decode_type == 'json':
ret['dict'] = json.loads(result_text)
elif decode_type == 'xml':
ret['dict'] = []
items = ET.fromstring(result_text)
for item in items:
ret['dict'].append(xml.to_dict(item))
else:
text = True
if decode_out and os.path.exists(decode_out):
with salt.utils.fopen(decode_out, 'w') as dof:
dof.write(result_text)
if text is True:
ret['text'] = result_text
return ret
def get_ca_bundle(opts=None):
'''
Return the location of the ca bundle file. See the following article:
http://tinyurl.com/k7rx42a
'''
if hasattr(get_ca_bundle, '__return_value__'):
return get_ca_bundle.__return_value__
if opts is None:
opts = {}
opts_bundle = opts.get('ca_bundle', None)
if opts_bundle is not None and os.path.exists(opts_bundle):
return opts_bundle
file_roots = opts.get('file_roots', {'base': [syspaths.SRV_ROOT_DIR]})
salt_root = file_roots['base'][0]
log.debug('file_roots is {0}'.format(salt_root))
# Please do not change the order without good reason
for path in (
# Check Salt first
os.path.join(salt_root, 'cacert.pem'),
os.path.join(salt_root, 'ca-bundle.crt'),
# Debian has paths that often exist on other distros
'/etc/ssl/certs/ca-certificates.crt',
# RedHat is also very common
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/pki/tls/certs/ca-bundle.trust.crt',
# RedHat's link for Debian compatability
'/etc/ssl/certs/ca-bundle.crt',
# Suse has an unusual path
'/var/lib/ca-certificates/ca-bundle.pem',
):
if os.path.exists(path):
return path
if salt.utils.is_windows() and HAS_CERTIFI:
return certifi.where()
return None
def update_ca_bundle(
target=None,
source=None,
opts=None,
merge_files=None,
):
'''
Attempt to update the CA bundle file from a URL
If not specified, the local location on disk (``target``) will be
auto-detected, if possible. If it is not found, then a new location on disk
will be created and updated.
The default ``source`` is:
http://curl.haxx.se/ca/cacert.pem
This is based on the information at:
http://curl.haxx.se/docs/caextract.html
A string or list of strings representing files to be appended to the end of
the CA bundle file may also be passed through as ``merge_files``.
'''
if opts is None:
opts = {}
if target is None:
target = get_ca_bundle(opts)
if target is None:
log.error('Unable to detect location to write CA bundle to')
return
if source is None:
source = opts.get('ca_bundle_url', 'http://curl.haxx.se/ca/cacert.pem')
log.debug('Attempting to download {0} to {1}'.format(source, target))
query(
source,
text=True,
decode=False,
headers=False,
status=False,
text_out=target
)
if merge_files is not None:
if isinstance(merge_files, string_types):
merge_files = [merge_files]
if not isinstance(merge_files, list):
log.error('A value was passed as merge_files which was not either '
'a string or a list')
return
merge_content = ''
for cert_file in merge_files:
if os.path.exists(cert_file):
log.debug(
'Queueing up {0} to be appended to {1}'.format(
cert_file, target
)
)
try:
with salt.utils.fopen(cert_file, 'r') as fcf:
merge_content = '\n'.join((merge_content, fcf.read()))
except IOError as exc:
log.error(
'Reading from {0} caused the following error: {1}'.format(
cert_file, exc
)
)
if merge_content:
log.debug('Appending merge_files to {0}'.format(target))
try:
with salt.utils.fopen(target, 'a') as tfp:
tfp.write('\n')
tfp.write(merge_content)
except IOError as exc:
log.error(
'Writing to {0} caused the following error: {1}'.format(
target, exc
)
)
def _render(template, render, renderer, template_dict, opts):
'''
Render a template
'''
if render:
if template_dict is None:
template_dict = {}
if not renderer:
renderer = opts.get('renderer', 'yaml_jinja')
rend = salt.loader.render(opts, {})
return compile_template(template, rend, renderer, **template_dict)
with salt.utils.fopen(template, 'r') as fh_:
return fh_.read()
|
the-stack_0_8469 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libbytesize(AutotoolsPackage):
"""The goal of this project is to provide a tiny library that would
facilitate the common operations with sizes in bytes."""
homepage = "https://github.com/storaged-project/libbytesize"
url = "https://github.com/storaged-project/libbytesize/releases/download/2.4/libbytesize-2.4.tar.gz"
version('2.4', sha256='25ccb5762bb8c860b63ed1d40e0c5564e3e0084693fbe6554467a8ca1c1d8c7f')
version('2.3', sha256='3c74113fc8cd1a2fbd8870fa0ed7cef2ef24d60ef91e7145fbc041f9aa144479')
version('2.2', sha256='b93c54b502880c095c9f5767a42464853e2687db2e5e3084908a615bafe73baa')
extends('python')
depends_on('pcre2')
depends_on('gmp')
depends_on('mpfr')
|
the-stack_0_8470 | import logging
import mimetypes
import time
from typing import Iterator, Callable
from urllib.parse import urlparse
import pymongo
from bson import ObjectId
from requests import HTTPError
from tsing_spider.porn.caoliu import CaoliuIndexPage, CaoliuThread
from ghs.spiders.base import BaseSpiderTaskGenerator
from ghs.utils.storage import create_s3_client, url_to_s3, create_mongodb_client, put_json
log = logging.getLogger(__file__)
mongodb_client = create_mongodb_client()
s3_client = create_s3_client()
collection = mongodb_client.get_database("resman").get_collection("spider_t66y")
def initialize():
"""
Initialize mongodb and s3
:return:
"""
log.info("Initializing database")
collection.create_index([("published", pymongo.ASCENDING)])
collection.create_index([("url", pymongo.ASCENDING)])
def thread_item_processor(caoliu_thread: CaoliuThread):
def wrapper():
if collection.find_one({"url": caoliu_thread.url}) is None:
_id = ObjectId()
data = dict(
_id=_id,
published=False,
url=caoliu_thread.url,
tid=caoliu_thread.tid,
title=caoliu_thread.title,
image_list=caoliu_thread.image_list,
comments=caoliu_thread.comments,
content_text=caoliu_thread.content_text
)
image_wrote_count = 0
for i, image_url in enumerate(caoliu_thread.image_list):
log.debug(f"Downloading image {i} for page {caoliu_thread.url}")
url_path = urlparse(image_url).path
mime_type = mimetypes.guess_type(url_path)[0]
file_suffix = url_path.split(".")[-1]
s3_path = f"t66y/{str(_id)}/images/{i}.{file_suffix}"
if url_to_s3(
s3_client,
image_url,
s3_path,
headers={"Referer": caoliu_thread.url},
content_type=mime_type,
ignore_4xx=True
):
image_wrote_count += 1
data["all_images_wrote"] = image_wrote_count >= len(caoliu_thread.image_list)
put_json(s3_client, data, f"t66y/{str(_id)}/meta.json")
collection.insert_one(data)
log.info(f"{caoliu_thread.url} already processed successfully.")
return wrapper
class CaoliuSpiderTaskGenerator(BaseSpiderTaskGenerator):
def __init__(self, max_page_index: int):
self.max_page_index = max_page_index
def generate(self) -> Iterator[Callable[[None], None]]:
initialize()
submitted_tasks = set()
page_index = 0
errors_remain = 3
while errors_remain > 0:
page_index += 1
if page_index > self.max_page_index:
break
base_page = CaoliuIndexPage(page_index)
log.info(f"Reading {base_page.url}.")
for i in range(errors_remain):
time.sleep(5.0)
try:
for thread_item in base_page.threads:
if thread_item.url not in submitted_tasks:
submitted_tasks.add(thread_item.url)
yield thread_item_processor(thread_item)
break
except HTTPError as he:
if he.response.status_code == 404:
errors_remain -= 1
else:
log.error(f"HTTP Error while reading {base_page.url}.", exc_info=he)
except Exception as ex:
log.error(f"Error while reading {base_page.url}.", exc_info=ex)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.