code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import pandas as pd
import numpy as np
import csv
import urllib.request
import json
from datetime import datetime
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
import web_scrapers
import os
def load_real_estate_data(filename, state_attr, state):
df = pd.read_csv(filename, encoding="ISO-8859-1")
df = df.loc[df[state_attr] == state]
return df
def load_data(filenames):
df_list=[]
for i in range(0, len(filenames)):
df = pd.read_csv(filenames[i], encoding="ISO-8859-1")
df_list.append(df)
return df_list
def create_zipcode_list(filenames):
zipcodes = {} # structured with within 5, 10 miles from another zipcode
zip_list = []
for i in range(0, len(filenames)):
with open(filenames[i], 'r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
your_list = list(reader)
for z in range(0, len(your_list)):
zipcodes[your_list[z][0]] = [], []
zip_list.append(your_list[z][0])
return zipcodes, zip_list
def wrangle_real_estate_data(df, zip_list, drop_columns):
df = df[df['RegionName'].isin(zip_list)]
df = df.drop(drop_columns, axis=1)
return df
def wrangle_IPO_data(df, zip_list):
df['Date Filed'] = pd.to_datetime(df['Date Filed'], format='%Y-%m-%d')
df['Lockup Expiration Date'] = pd.to_datetime(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y')
df = df[df['Zipcode'].isin(zip_list)]
df = df.drop(['Lockup Expiration Date'], axis=1)
df['Lockup Expiration Date'] = df['Date Filed'] + timedelta(days=180)
df = df[df['Date Filed']> df['Date Filed'].min()+ timedelta(days=366)]
return df
def wrangle_census_data(df_census_econ, df_census_dem, zip_list, census_econ_columns, census_dem_columns):
df_census_econ.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_econ.rename(
columns={'Percent; EMPLOYMENT STATUS - Civilian labor force - Unemployment Rate': 'Unemployment Rate'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Less than $10,000': 'l10000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $10,000 to $14,999': 'l15000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $15,000 to $24,999': 'l25000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; COMMUTING TO WORK - Mean travel time to work (minutes)': 'Mean Travel Time to Work Estimate (minutes)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $200,000 or more': 'Percent of Households with Income Greater than $200,000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Median household income (dollars)': 'Median Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Mean household income (dollars)': 'Mean Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Per capita income (dollars)': 'Per Capita Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; HEALTH INSURANCE COVERAGE - Civilian noninstitutionalized population - No health insurance coverage': 'Percent of Population with no Health Insurance Coverage'},
inplace=True)
df_census_econ.rename(columns={
'Percent; PERCENTAGE OF FAMILIES AND PEOPLE WHOSE INCOME IN THE PAST 12 MONTHS IS BELOW THE POVERTY LEVEL - All people': 'Percent of People whose Income in the Past 12 months has been Below Poverty Level'},
inplace=True)
df_census_econ['l10000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l10000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l10000'] = df_census_econ['l10000'].astype(float)
df_census_econ['l15000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l15000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l15000'] = df_census_econ['l15000'].astype(float)
df_census_econ['l25000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l25000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l25000'] = df_census_econ['l25000'].astype(float)
df_census_econ["Percent of Households With Income Less Than $24,999"] = df_census_econ['l10000'] + df_census_econ[
'l15000'] + df_census_econ['l25000']
df_census_econ = df_census_econ.filter(census_econ_columns)
df_census_dem.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_dem.rename(columns={'Estimate; SEX AND AGE - Median age (years)': 'Median Age'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - Under 18 years': 'Percent of People under 18 years of age'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 65 years and over': 'Percent of People 65 years and over'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Male': 'Percent of Males'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Female': 'Percent of Females'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race)': 'Percent of People who are Hispanic'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - White alone': 'Percent of People who are White'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Black or African American alone': 'Percent of People who are Black or African American'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Asian alone': 'Percent of People who are Asian'},
inplace=True)
df_census_dem = df_census_dem.filter(census_dem_columns)
# filter data to only Silicon Valley + San Francisco Zip Codes
df_census_dem = df_census_dem[df_census_dem['Zipcode'].isin(zip_list)]
df_census_econ = df_census_econ[df_census_econ['Zipcode'].isin(zip_list)]
return df_census_econ, df_census_dem
def wrangle_real_estate_headers(df):
'''
run before joining dataframes so keys match
df_sale_counts_by_zip_silicon_valley.columns = df_sale_counts_by_zip_silicon_valley.columns.str.replace('Sales Counts ', '')
df_sale_counts_by_zip_silicon_valley = df_sale_counts_by_zip_silicon_valley.add_prefix('Sales Counts ')
df_sale_counts_by_zip_silicon_valley.rename(columns = {'Sales Counts RegionName':'Zipcode'}, inplace=True)
'''
df.columns = df.columns.str.replace('All Homes ', '')
df = df.add_prefix('All Homes ')
df.rename(columns={'All Homes RegionName': 'Zipcode'}, inplace=True)
return df
def wrangle_ipo_headers(df):
df.rename(columns={'Ticker': 'Symbol'}, inplace=True)
df["Found"] = df["Found"].astype(dtype=np.int64)
return df
def join_data(df1, df2, key, join_type):
df = df1.set_index(key).merge(df2, on=key, how=join_type)
return df
def merge_data(df1, df2, key):
df = pd.merge(df1, df2, on=key, how='inner')
return df
def df_replace(df, replace_list):
for i in range(0, len(replace_list)):
df = df.replace([replace_list[i]], [''], regex=True)
return df
def drop_columns_and_nans(df, drop_columns, nan_columns):
df = df.drop(['IPO Name', 'Offer date', 'CUSIP', 'PERM'], axis=1)
for i in range(0, len(nan_columns)):
df.drop_duplicates(subset=nan_columns[i], keep='first', inplace=True)
return df
def calculate_distance_between_zips(zipcode, min_radius, max_radius):
# api-endpoint
URL_base = "https://api.zip-codes.com/ZipCodesAPI.svc/1.0/FindZipCodesInRadius?zipcode="
URL = URL_base + zipcode + '&minimumradius=' + min_radius + '&maximumradius=' + max_radius + '&key=<KEY>'
# sending get request and saving the response as response object
contents = urllib.request.urlopen(URL).read()
# printing the output
zipcodes_nearby = []
print(json.loads(contents))
for i in range(1, len(json.loads(contents)['DataList'])):
zipcodes_nearby.append(json.loads(contents)['DataList'][i]['Code'])
return zipcodes_nearby
def create_zipcode_distances_dictionary(zipcodes, zip_list):
'''
***DONT RUN IF THESE ARE ALREADY CREATED***
currently stored as data/processed/zipcodes_within_radius.txt
'''
print(len(zip_list))
for i in range(0, len(zip_list)):
zipcodes[zip_list[i]] = calculate_distance_between_zips(zip_list[i], '0', '5'), calculate_distance_between_zips(
zip_list[i], '5', '10')
return zipcodes
def create_text_file_from_dictionary(filename, dictionary):
'''
with open('data/processed/zipcodes_within_radius.txt', 'w') as json_file:
json.dump(zipcodes, json_file)
'''
with open(filename, 'w') as json_file:
json.dump(dictionary, json_file)
return dictionary
def export_dataframe_to_dictionary(df, name):
filename = 'data/processed/' + name + '.csv'
export_csv = df.to_csv(filename, index=True, header=True) # Don't forget to add '.csv' at the end of the path
def update_zipcodes_dict(zipcodes, zip_list):
exists = os.path.isfile('../data/processed/zipcodes_within_radius.txt')
if not exists:
zipcodes = create_zipcode_distances_dictionary(zipcodes, zip_list)
create_text_file_from_dictionary('../data/processed/zipcodes_within_radius.txt', zipcodes)
else:
zipcodes = {}
with open('../data/processed/zipcodes_within_radius.txt', 'r') as f:
zipcodes = json.load(f)
return zipcodes
def create_IPO_an_Zipcode_dataframe(census_econ_cols, census_dem_cols, df_ipo, df_zip, zipcodes):
if 'Zipcode' in census_econ_cols:
census_econ_cols.remove('Zipcode')
if 'Zipcode' in census_dem_cols:
census_dem_cols.remove('Zipcode')
ipo_header_list = list(df_ipo.columns.values) +census_dem_cols+census_econ_cols + ['All Homes Date Filed',
'All Homes Lockup Expiration Date',
'All Homes 1 Year Before Date Filed',
'All Homes 2 Years After Date Filed']
'''
Distance from IPO = estimate is .2 if in the same zipcode as IPO
= estimate is 0.5 if not in same zip code as IPO and less than 5 miles from zipcode to IPO
= estimate is 1 if greater than 5 and less than 10 miles from zipcode to IPO
'''
new_df_list = []
for index, row in df_ipo.iterrows():
ipo_zipcode = str(row['Zipcode'])
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(ipo_zipcode)]
headerList = join_IPO_and_Zip_Data(row['Date Filed'], row['Lockup Expiration Date'], census_econ_cols,census_dem_cols)
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = .2
dictionary['Zipcode for Distance'] = ipo_zipcode
new_df_list.append(dictionary)
within_5miles = zipcodes[ipo_zipcode][0]
within_10miles = zipcodes[ipo_zipcode][1]
for i in range(0, len(within_5miles)):
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_5miles[i])]
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = .5
dictionary['Zipcode for Distance'] = within_5miles[i]
new_df_list.append(dictionary)
for j in range(0, len(within_10miles)):
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_10miles[j])]
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = 1
dictionary['Zipcode for Distance'] = within_10miles[j]
new_df_list.append(dictionary)
ipo_final_df = pd.DataFrame(new_df_list)
ipo_final_df.dropna(subset=['Median Age'], how='all', inplace=True)
ipo_final_df.dropna(subset=['All Homes Date Filed'], how='all', inplace=True)
return ipo_final_df
def normalize_IPO_an_Zipcode_dataframe(normalization_list, df_ipo):
df_ipo = df_ipo.replace(['--'], [''], regex=True)
df_ipo = df_ipo.replace(r'^\s*$', np.nan, regex=True)
df_ipo = df_ipo.replace(['\,'], [''], regex=True)
df_ipo = df_ipo.replace(['\+'], [''], regex=True)
scaler = MinMaxScaler()
df_ipo[normalization_list] = scaler.fit_transform(df_ipo[normalization_list])
return df_ipo
def join_IPO_and_Zip_Data(IPO_Date_Filed, IPO_Lockup_Expiration_Date, census_econ_cols, census_dem_cols):
filtered_columns = census_dem_cols +census_econ_cols # remove 'zipcode'
ipo_month_filed = IPO_Date_Filed.month
ipo_year_filed = IPO_Date_Filed.year
AllHomes_header_filed = 'All Homes ' + str(ipo_year_filed) + '-' + str(ipo_month_filed).zfill(2)
ipo_month = IPO_Lockup_Expiration_Date.month
ipo_year = IPO_Lockup_Expiration_Date.year
AllHomes_header_lockup = 'All Homes ' + str(ipo_year) + '-' + str(ipo_month).zfill(2)
AllHomes_header_filed_1_yr_ago = 'All Homes ' + str(int(ipo_year_filed) - 1) + '-' + str(ipo_month_filed).zfill(2)
AllHomes_header_filed_2_yr = 'All Homes ' + str(int(ipo_year_filed) + 2) + '-' + str(ipo_month_filed).zfill(2)
filtered_columns = filtered_columns + [AllHomes_header_filed, AllHomes_header_lockup,
AllHomes_header_filed_1_yr_ago,
AllHomes_header_filed_2_yr]
return filtered_columns
def update_ipo_list(year, start_month, end_month):
web_scrapers.add_new_ipo_data_to_csv('../data/processed/1997-04_2019_full_ipo_data.csv', year, start_month, end_month)
df_ipo_list = load_data(['../data/processed/1997-04_2019_full_ipo_data.csv', '../data/raw/ipo_ritter_data.csv'])
zipcodes, zip_list = create_zipcode_list(
['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv',
'../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv'])
df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list)
df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1])
df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left')
df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK'])
df_ipo.to_csv("../data/processed/df_ipo.csv", index=True)
def main():
df_real_estate = load_real_estate_data('../data/raw/Zip_Zhvi_AllHomes.csv', 'State', 'CA')
# data processing to load all IPO Data between 1997 and present data. This data has been scraped using code from src/web_scrapers.py
df_ipo_list = load_data(['../data/processed/df_ipo.csv', '../data/raw/ipo_ritter_data.csv'])
df_census_list = load_data(['../data/raw/zip_census_bureau_economic_characteristics_2017.csv', '../data/raw/zip_census_bureau_age_race_2017.csv'])
zipcodes, zip_list = create_zipcode_list(
['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv',
'../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv'])
df_real_estate = wrangle_real_estate_data(df_real_estate, zip_list,['City', 'State', 'Metro', 'CountyName', 'SizeRank'])
df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list)
census_econ_columns = ['Zipcode',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999']
census_dem_columns = ['Zipcode',
'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian']
df_census_econ, df_census_dem = wrangle_census_data(df_census_list[0], df_census_list[1], zip_list,
census_econ_columns, census_dem_columns)
df_real_estate = wrangle_real_estate_headers(df_real_estate)
df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1])
df_ipo_ritter = df_ipo_ritter.drop(['Found'], axis=1)
df_census = join_data(df_census_econ, df_census_dem, 'Zipcode', 'inner')
df_zip = merge_data(df_census, df_real_estate, 'Zipcode')
df_zip = df_replace(df_zip, ['\+', '\,'])
print(df_zip['All Homes 2019-05'])
df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left')
df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK'])
df_ipo['Found'] = 2019.0 - df_ipo['Found']
normalization_list = ['Offer Amount', 'Number of Employees', 'Found', 'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999']
zipcodes = update_zipcodes_dict(zipcodes, zip_list)
df_ipo_all = create_IPO_an_Zipcode_dataframe(census_econ_columns, census_dem_columns, df_ipo, df_zip, zipcodes)
df_ipo_all.to_csv("../data/processed/df_ipo_all.csv", index=False)
if __name__ == "__main__":
print("we are wrangling data")
#update_ipo_list(2019, 6, 7)
main()
|
[
"json.loads",
"pandas.read_csv",
"pandas.merge",
"json.dump",
"web_scrapers.add_new_ipo_data_to_csv",
"os.path.isfile",
"json.load",
"numpy.array",
"csv.reader",
"pandas.DataFrame",
"datetime.timedelta",
"sklearn.preprocessing.MinMaxScaler",
"pandas.to_datetime"
] |
[((290, 334), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'encoding': '"""ISO-8859-1"""'}), "(filename, encoding='ISO-8859-1')\n", (301, 334), True, 'import pandas as pd\n'), ((1285, 1336), 'pandas.to_datetime', 'pd.to_datetime', (["df['Date Filed']"], {'format': '"""%Y-%m-%d"""'}), "(df['Date Filed'], format='%Y-%m-%d')\n", (1299, 1336), True, 'import pandas as pd\n'), ((1372, 1457), 'pandas.to_datetime', 'pd.to_datetime', (["df['Lockup Expiration Date']"], {'errors': '"""coerce"""', 'format': '"""%m/%d/%Y"""'}), "(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y'\n )\n", (1386, 1457), True, 'import pandas as pd\n'), ((7975, 8014), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'on': 'key', 'how': '"""inner"""'}), "(df1, df2, on=key, how='inner')\n", (7983, 8014), True, 'import pandas as pd\n'), ((10121, 10183), 'os.path.isfile', 'os.path.isfile', (['"""../data/processed/zipcodes_within_radius.txt"""'], {}), "('../data/processed/zipcodes_within_radius.txt')\n", (10135, 10183), False, 'import os\n'), ((13340, 13365), 'pandas.DataFrame', 'pd.DataFrame', (['new_df_list'], {}), '(new_df_list)\n', (13352, 13365), True, 'import pandas as pd\n'), ((13849, 13863), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (13861, 13863), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((15077, 15204), 'web_scrapers.add_new_ipo_data_to_csv', 'web_scrapers.add_new_ipo_data_to_csv', (['"""../data/processed/1997-04_2019_full_ipo_data.csv"""', 'year', 'start_month', 'end_month'], {}), "(\n '../data/processed/1997-04_2019_full_ipo_data.csv', year, start_month,\n end_month)\n", (15113, 15204), False, 'import web_scrapers\n'), ((485, 533), 'pandas.read_csv', 'pd.read_csv', (['filenames[i]'], {'encoding': '"""ISO-8859-1"""'}), "(filenames[i], encoding='ISO-8859-1')\n", (496, 533), True, 'import pandas as pd\n'), ((1602, 1621), 'datetime.timedelta', 'timedelta', ([], {'days': '(180)'}), '(days=180)\n', (1611, 1621), False, 'from datetime import timedelta\n'), ((8924, 8944), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (8934, 8944), False, 'import json\n'), ((9794, 9826), 'json.dump', 'json.dump', (['dictionary', 'json_file'], {}), '(dictionary, json_file)\n', (9803, 9826), False, 'import json\n'), ((838, 851), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (848, 851), False, 'import csv\n'), ((10509, 10521), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10518, 10521), False, 'import json\n'), ((1677, 1696), 'datetime.timedelta', 'timedelta', ([], {'days': '(366)'}), '(days=366)\n', (1686, 1696), False, 'from datetime import timedelta\n'), ((8972, 8992), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (8982, 8992), False, 'import json\n'), ((11934, 11954), 'numpy.array', 'np.array', (['row.values'], {}), '(row.values)\n', (11942, 11954), True, 'import numpy as np\n'), ((12499, 12519), 'numpy.array', 'np.array', (['row.values'], {}), '(row.values)\n', (12507, 12519), True, 'import numpy as np\n'), ((12993, 13013), 'numpy.array', 'np.array', (['row.values'], {}), '(row.values)\n', (13001, 13013), True, 'import numpy as np\n'), ((9039, 9059), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (9049, 9059), False, 'import json\n')]
|
from director.devel.plugin import GenericPlugin
from director.fieldcontainer import FieldContainer
from .lib import measurementpanel
from PythonQt import QtCore
class Plugin(GenericPlugin):
ID = 'measurement_tool'
NAME = 'MeasurementTool'
DEPENDENCIES = ['MainWindow']
def __init__(self, app, view):
super(Plugin, self).__init__(app, view)
def init(self, fields):
measurementPanel = measurementpanel.MeasurementPanel(self.app, self.view)
measurementDock = self.app.addWidgetToDock(
measurementPanel.widget,
QtCore.Qt.RightDockWidgetArea,
visible=False
)
# ---
return FieldContainer(
measurementToolPanel=measurementPanel,
measurementToolDock=measurementDock
)
|
[
"director.fieldcontainer.FieldContainer"
] |
[((625, 720), 'director.fieldcontainer.FieldContainer', 'FieldContainer', ([], {'measurementToolPanel': 'measurementPanel', 'measurementToolDock': 'measurementDock'}), '(measurementToolPanel=measurementPanel, measurementToolDock=\n measurementDock)\n', (639, 720), False, 'from director.fieldcontainer import FieldContainer\n')]
|
__author__ = '<NAME> - www.tonybeltramelli.com'
# scripted agents taken from PySC2, credits to DeepMind
# https://github.com/deepmind/pysc2/blob/master/pysc2/agents/scripted_agent.py
import numpy as np
import uuid
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_SCREEN_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_SCREEN_SELECTED = features.SCREEN_FEATURES.selected.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class ScriptedAgent(base_agent.BaseAgent):
def step(self, obs):
super(ScriptedAgent, self).step(obs)
# we expand dims because keras wants 4 dims for convolutions
# observation = np.expand_dims(obs.observation["screen"][_SCREEN_PLAYER_RELATIVE], axis=3)
screens = [obs.observation["screen"][_SCREEN_PLAYER_RELATIVE],
obs.observation["screen"][_SCREEN_SELECTED]]
observation = np.stack(screens, axis=2)
if self.game == "beacon":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
if not neutral_y.any():
action = _NO_OP
params = []
else:
target = [int(neutral_x.mean()), int(neutral_y.mean())]
action = _MOVE_SCREEN
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "mineral":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
action = _MOVE_SCREEN
index_x = np.argmin(neutral_x)
index_y = np.argmin(neutral_y)
index = index_x if neutral_x[index_x] + neutral_y[index_x] < neutral_x[index_y] + neutral_y[index_y] else index_y
target = [neutral_x[index], neutral_y[index]]
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "minerals":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = np.linalg.norm(np.array(player) - np.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
action = _MOVE_SCREEN
params = [[0], closest]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "roaches":
if _ATTACK_SCREEN in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
roach_y, roach_x = (player_relative == _PLAYER_HOSTILE).nonzero()
if not roach_y.any():
action = _NO_OP
params = [_NOT_QUEUED]
else:
index = np.argmax(roach_y)
target = [roach_x[index], roach_y[index]]
action = _ATTACK_SCREEN
params = [_NOT_QUEUED, target]
elif _SELECT_ARMY in obs.observation["available_actions"]:
action = _SELECT_ARMY
params = [_SELECT_ALL]
else:
action = _NO_OP
params = [_NOT_QUEUED]
self.states.append(np.array([observation, obs.observation["available_actions"], action, params]))
if len(self.states) == 64:
new_file_name = str(uuid.uuid1())
np.save("dataset_{}/{}".format(self.game, new_file_name), np.array(self.states))
self.states = []
return actions.FunctionCall(action, params)
class AgentRoaches(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "roaches"
self.states = []
class AgentBeacon(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "beacon"
self.states = []
class AgentMineral(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "mineral"
self.states = []
class AgentMinerals(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "minerals"
self.states = []
|
[
"pysc2.lib.actions.FunctionCall",
"pysc2.agents.base_agent.BaseAgent.__init__",
"numpy.argmax",
"uuid.uuid1",
"numpy.stack",
"numpy.array",
"numpy.argmin"
] |
[((1168, 1193), 'numpy.stack', 'np.stack', (['screens'], {'axis': '(2)'}), '(screens, axis=2)\n', (1176, 1193), True, 'import numpy as np\n'), ((5072, 5108), 'pysc2.lib.actions.FunctionCall', 'actions.FunctionCall', (['action', 'params'], {}), '(action, params)\n', (5092, 5108), False, 'from pysc2.lib import actions\n'), ((5178, 5213), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5207, 5213), False, 'from pysc2.agents import base_agent\n'), ((5337, 5372), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5366, 5372), False, 'from pysc2.agents import base_agent\n'), ((5496, 5531), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5525, 5531), False, 'from pysc2.agents import base_agent\n'), ((5657, 5692), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5686, 5692), False, 'from pysc2.agents import base_agent\n'), ((4771, 4848), 'numpy.array', 'np.array', (["[observation, obs.observation['available_actions'], action, params]"], {}), "([observation, obs.observation['available_actions'], action, params])\n", (4779, 4848), True, 'import numpy as np\n'), ((4918, 4930), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4928, 4930), False, 'import uuid\n'), ((5003, 5024), 'numpy.array', 'np.array', (['self.states'], {}), '(self.states)\n', (5011, 5024), True, 'import numpy as np\n'), ((2432, 2452), 'numpy.argmin', 'np.argmin', (['neutral_x'], {}), '(neutral_x)\n', (2441, 2452), True, 'import numpy as np\n'), ((2483, 2503), 'numpy.argmin', 'np.argmin', (['neutral_y'], {}), '(neutral_y)\n', (2492, 2503), True, 'import numpy as np\n'), ((4330, 4348), 'numpy.argmax', 'np.argmax', (['roach_y'], {}), '(roach_y)\n', (4339, 4348), True, 'import numpy as np\n'), ((3567, 3583), 'numpy.array', 'np.array', (['player'], {}), '(player)\n', (3575, 3583), True, 'import numpy as np\n'), ((3586, 3597), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (3594, 3597), True, 'import numpy as np\n')]
|
import time
import sys
import dask
from dask.distributed import (
wait,
futures_of,
Client,
)
from tpch import loaddata, queries
#from benchmarks import utils
# Paths or URLs to the TPC-H tables.
#table_paths = {
# 'CUSTOMER': 'hdfs://bu-23-115:9000/tpch/customer.tbl',
# 'LINEITEM': 'hdfs://bu-23-115:9000/tpch/lineitem.tbl',
# 'NATION': 'hdfs://bu-23-115:9000/tpch/nation.tbl',
# 'ORDERS': 'hdfs://bu-23-115:9000/tpch/orders.tbl',
# 'PART': 'hdfs://bu-23-115:9000/tpch/part.tbl',
# 'PARTSUPP': 'hdfs://bu-23-115:9000/tpch/partsupp.tbl',
# 'REGION': 'hdfs://bu-23-115:9000/tpch/region.tbl',
# 'SUPPLIER': 'hdfs://bu-23-115:9000/tpch/supplier.tbl',
#}
table_paths = {
'CUSTOMER': '/root/2g/customer.tbl',
'LINEITEM': '/root/2g/lineitem.tbl',
'NATION': '/root/2g/nation.tbl',
'ORDERS': '/root/2g/orders.tbl',
'PART': '/root/2g/part.tbl',
'PARTSUPP': '/root/2g/partsupp.tbl',
'REGION': '/root/2g/region.tbl',
'SUPPLIER': '/root/2g/supplier.tbl',
}
#table_paths = {
# 'CUSTOMER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/customer.tbl',
# 'LINEITEM': 'https://gochaudhstorage001.blob.core.windows.net/tpch/lineitem.tbl',
# 'NATION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/nation.tbl',
# 'ORDERS': 'https://gochaudhstorage001.blob.core.windows.net/tpch/orders.tbl',
# 'PART': 'https://gochaudhstorage001.blob.core.windows.net/tpch/part.tbl',
# 'PARTSUPP': 'https://gochaudhstorage001.blob.core.windows.net/tpch/partsupp.tbl',
# 'REGION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/region.tbl',
# 'SUPPLIER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/supplier.tbl',
#}
def main():
if len(sys.argv) < 2:
print("args: <dask client>")
return 1
client = Client(sys.argv[1])
timing_supported = False
# set to False if running against upstream dask without our code changes.
benchmarker = TpchBenchmarkManager(client, timing_supported)
benchmarker.load_tables(
part_path = table_paths['PART'],
supplier_path = table_paths['SUPPLIER'],
partsupp_path = table_paths['PARTSUPP'],
customer_path = table_paths['CUSTOMER'],
orders_path = table_paths['ORDERS'],
lineitem_path = table_paths['LINEITEM'],
nation_path = table_paths['NATION'],
region_path = table_paths['REGION'],
)
# Choose what queries you want to run here.
benchmarker.run_query(1)
#benchmarker.run_query(3)
#benchmarker.run_query(6)
#benchmarker.run_query(21)
class TpchBenchmarkManager:
def __init__(self, client, timing_supported=True):
self.client = client
self.timing_supported = timing_supported
self.tables = {}
def load_tables(self,
*,
# Paths/URLs for TPCH tables source data.
part_path=None,
supplier_path=None,
partsupp_path=None,
customer_path=None,
orders_path=None,
lineitem_path=None,
nation_path=None,
region_path=None,
):
paths = {
'PART': part_path,
'SUPPLIER': supplier_path,
'PARTSUPP': partsupp_path,
'CUSTOMER': customer_path,
'ORDERS': orders_path,
'LINEITEM': lineitem_path,
'NATION': nation_path,
'REGION': region_path,
}
for tablename, path in paths.items():
if path is None:
print("\nNo path given for table {}. Skipping.".format(tablename))
continue
print("\n====================================")
print("Ingesting table {}... \n(from {})".format(tablename, path))
load_start = time.time()
table = loaddata.loader[tablename](path)
#table = self.client.persist(table)
#wait(table)
load_duration = time.time() - load_start
self.tables[tablename] = table
futures = futures_of(table)
print("...complete.")
print("\nE2E time: {:.3f} seconds. Number of partitions: {}".format(
load_duration, len(futures)))
print("====================================\n")
if self.timing_supported:
longest_future = None
longest_future_duration = None
for future in futures:
duration = self.client.timing_info(future)[0]['duration']
if longest_future is None or duration > longest_future_duration:
longest_future = future
longest_future_duration = duration
print("Profile of slowest partition:")
#utils.prettyprint_timing_info(self.client.timing_info(longest_future))
def run_query(self, query_num):
print("\n====================================")
print("Executing query {}...".format(query_num))
query_start = time.time()
futures = queries.by_number[query_num](self.tables)
future = self.client.compute(futures)
result = self.client.gather(future)
query_duration = time.time() - query_start
print("...complete.")
print("\nE2E time: {:.3f} seconds.".format(query_duration))
if self.timing_supported:
try:
utils.prettyprint_timing_info(self.client.timing_info(future))
except Exception as e:
print(str(e))
print(result)
return future
if __name__ == '__main__':
main()
|
[
"dask.distributed.Client",
"dask.distributed.futures_of",
"time.time"
] |
[((1825, 1844), 'dask.distributed.Client', 'Client', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1831, 1844), False, 'from dask.distributed import wait, futures_of, Client\n'), ((5009, 5020), 'time.time', 'time.time', ([], {}), '()\n', (5018, 5020), False, 'import time\n'), ((3765, 3776), 'time.time', 'time.time', ([], {}), '()\n', (3774, 3776), False, 'import time\n'), ((4021, 4038), 'dask.distributed.futures_of', 'futures_of', (['table'], {}), '(table)\n', (4031, 4038), False, 'from dask.distributed import wait, futures_of, Client\n'), ((5196, 5207), 'time.time', 'time.time', ([], {}), '()\n', (5205, 5207), False, 'import time\n'), ((3931, 3942), 'time.time', 'time.time', ([], {}), '()\n', (3940, 3942), False, 'import time\n')]
|
"""Use pika with the Tornado IOLoop
"""
import logging
from tornado import ioloop
from pika.adapters.utils import nbio_interface, selector_ioloop_adapter
from pika.adapters import base_connection
LOGGER = logging.getLogger(__name__)
class TornadoConnection(base_connection.BaseConnection):
"""The TornadoConnection runs on the Tornado IOLoop.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new instance of the TornadoConnection class, connecting
to RabbitMQ automatically
:param pika.connection.Parameters parameters: Connection parameters
:param on_open_callback: The method to call when the connection is open
:type on_open_callback: method
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | ioloop.IOLoop |
nbio_interface.AbstractIOServices custom_ioloop:
Override using the global IOLoop in Tornado
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = (
selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance()))
super(TornadoConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance())
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
|
[
"logging.getLogger",
"tornado.ioloop.IOLoop.instance"
] |
[((210, 237), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (227, 237), False, 'import logging\n'), ((2920, 2944), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (2942, 2944), False, 'from tornado import ioloop\n'), ((2211, 2235), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (2233, 2235), False, 'from tornado import ioloop\n')]
|
from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
|
[
"mock.mock.patch",
"ca_test_common.set_module_args",
"mock.mock.patch.dict",
"ceph_volume_simple_activate.main",
"pytest.raises",
"mock.mock.patch.object"
] |
[((412, 471), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.exit_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.exit_json')\n", (417, 471), False, 'from mock.mock import patch\n'), ((1160, 1219), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.exit_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.exit_json')\n", (1165, 1219), False, 'from mock.mock import patch\n'), ((1225, 1286), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.run_command"""'], {}), "('ansible.module_utils.basic.AnsibleModule.run_command')\n", (1230, 1286), False, 'from mock.mock import patch\n'), ((2029, 2088), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.exit_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.exit_json')\n", (2034, 2088), False, 'from mock.mock import patch\n'), ((2094, 2155), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.run_command"""'], {}), "('ansible.module_utils.basic.AnsibleModule.run_command')\n", (2099, 2155), False, 'from mock.mock import patch\n'), ((2892, 2942), 'mock.mock.patch.object', 'patch.object', (['os.path', '"""exists"""'], {'return_value': '(True)'}), "(os.path, 'exists', return_value=True)\n", (2904, 2942), False, 'from mock.mock import patch\n'), ((2948, 3007), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.exit_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.exit_json')\n", (2953, 3007), False, 'from mock.mock import patch\n'), ((3013, 3074), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.run_command"""'], {}), "('ansible.module_utils.basic.AnsibleModule.run_command')\n", (3018, 3074), False, 'from mock.mock import patch\n'), ((3839, 3890), 'mock.mock.patch.object', 'patch.object', (['os.path', '"""exists"""'], {'return_value': '(False)'}), "(os.path, 'exists', return_value=False)\n", (3851, 3890), False, 'from mock.mock import patch\n'), ((3896, 3955), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.fail_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.fail_json')\n", (3901, 3955), False, 'from mock.mock import patch\n'), ((4432, 4491), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.exit_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.exit_json')\n", (4437, 4491), False, 'from mock.mock import patch\n'), ((4497, 4558), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.run_command"""'], {}), "('ansible.module_utils.basic.AnsibleModule.run_command')\n", (4502, 4558), False, 'from mock.mock import patch\n'), ((5396, 5468), 'mock.mock.patch.dict', 'patch.dict', (['os.environ', "{'CEPH_CONTAINER_BINARY': fake_container_binary}"], {}), "(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})\n", (5406, 5468), False, 'from mock.mock import patch\n'), ((5474, 5544), 'mock.mock.patch.dict', 'patch.dict', (['os.environ', "{'CEPH_CONTAINER_IMAGE': fake_container_image}"], {}), "(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})\n", (5484, 5544), False, 'from mock.mock import patch\n'), ((5550, 5609), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.exit_json"""'], {}), "('ansible.module_utils.basic.AnsibleModule.exit_json')\n", (5555, 5609), False, 'from mock.mock import patch\n'), ((5615, 5676), 'mock.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.run_command"""'], {}), "('ansible.module_utils.basic.AnsibleModule.run_command')\n", (5620, 5676), False, 'from mock.mock import patch\n'), ((529, 636), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'osd_id': fake_id, 'osd_fsid': fake_uuid, '_ansible_check_mode': True}"], {}), "({'osd_id': fake_id, 'osd_fsid': fake_uuid,\n '_ansible_check_mode': True})\n", (559, 636), False, 'import ca_test_common\n'), ((1356, 1430), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'osd_id': fake_id, 'osd_fsid': fake_uuid}"], {}), "({'osd_id': fake_id, 'osd_fsid': fake_uuid})\n", (1386, 1430), False, 'import ca_test_common\n'), ((2230, 2279), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'osd_all': True}"], {}), "({'osd_all': True})\n", (2260, 2279), False, 'import ca_test_common\n'), ((3163, 3214), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'path': fake_path}"], {}), "({'path': fake_path})\n", (3193, 3214), False, 'import ca_test_common\n'), ((4033, 4084), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'path': fake_path}"], {}), "({'path': fake_path})\n", (4063, 4084), False, 'import ca_test_common\n'), ((4640, 4736), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'osd_id': fake_id, 'osd_fsid': fake_uuid, 'systemd': False}"], {}), "({'osd_id': fake_id, 'osd_fsid': fake_uuid,\n 'systemd': False})\n", (4670, 4736), False, 'import ca_test_common\n'), ((5757, 5831), 'ca_test_common.set_module_args', 'ca_test_common.set_module_args', (["{'osd_id': fake_id, 'osd_fsid': fake_uuid}"], {}), "({'osd_id': fake_id, 'osd_fsid': fake_uuid})\n", (5787, 5831), False, 'import ca_test_common\n'), ((752, 797), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleExitJson'], {}), '(ca_test_common.AnsibleExitJson)\n', (765, 797), False, 'import pytest\n'), ((821, 855), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (853, 855), False, 'import ceph_volume_simple_activate\n'), ((1654, 1699), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleExitJson'], {}), '(ca_test_common.AnsibleExitJson)\n', (1667, 1699), False, 'import pytest\n'), ((1723, 1757), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (1755, 1757), False, 'import ceph_volume_simple_activate\n'), ((2486, 2531), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleExitJson'], {}), '(ca_test_common.AnsibleExitJson)\n', (2499, 2531), False, 'import pytest\n'), ((2555, 2589), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (2587, 2589), False, 'import ceph_volume_simple_activate\n'), ((3421, 3466), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleExitJson'], {}), '(ca_test_common.AnsibleExitJson)\n', (3434, 3466), False, 'import pytest\n'), ((3490, 3524), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (3522, 3524), False, 'import ceph_volume_simple_activate\n'), ((4180, 4225), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleFailJson'], {}), '(ca_test_common.AnsibleFailJson)\n', (4193, 4225), False, 'import pytest\n'), ((4249, 4283), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (4281, 4283), False, 'import ceph_volume_simple_activate\n'), ((4963, 5008), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleExitJson'], {}), '(ca_test_common.AnsibleExitJson)\n', (4976, 5008), False, 'import pytest\n'), ((5032, 5066), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (5064, 5066), False, 'import ceph_volume_simple_activate\n'), ((6051, 6096), 'pytest.raises', 'pytest.raises', (['ca_test_common.AnsibleExitJson'], {}), '(ca_test_common.AnsibleExitJson)\n', (6064, 6096), False, 'import pytest\n'), ((6120, 6154), 'ceph_volume_simple_activate.main', 'ceph_volume_simple_activate.main', ([], {}), '()\n', (6152, 6154), False, 'import ceph_volume_simple_activate\n')]
|
import setuptools
setuptools.setup(
name='mintermonitoring',
version='1.0.0',
packages=setuptools.find_packages(include=['mintermonitoring'])
)
|
[
"setuptools.find_packages"
] |
[((97, 151), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'include': "['mintermonitoring']"}), "(include=['mintermonitoring'])\n", (121, 151), False, 'import setuptools\n')]
|
import pandas as pd
import os.path
length_switch = True
max_body_length = 50
process_candidates = os.path.exists('./datasets/candidates.output')
x_train = open('./datasets/x_train').readlines()
x_train = [x.rstrip('\n') for x in x_train]
y_train = open('./datasets/y_train').readlines()
y_train = [x.rstrip('\n') for x in y_train]
x_valid = open('./datasets/x_valid').readlines()
x_valid = [x.rstrip('\n') for x in x_valid]
y_valid = open('./datasets/y_valid').readlines()
y_valid = [x.rstrip('\n') for x in y_valid]
bytecodes = open('./datasets/bytecode.output').readlines()
bytecodes = [x.rstrip('\n') for x in bytecodes]
references = open('./datasets/references.output').readlines()
references = [x.rstrip('\n') for x in references]
if (process_candidates):
candidates = open('./datasets/candidates.output').readlines()
candidates = [x.rstrip('\n') for x in candidates]
df_pairs = pd.DataFrame({'source': bytecodes, 'target' : references, 'candidates': candidates })
else:
df_pairs = pd.DataFrame({'source': bytecodes, 'target': references })
if (length_switch):
mask = df_pairs['source'].apply(lambda x: len(x.split()) <= max_body_length)
df_pairs = df_pairs.loc[mask]
df_train = pd.DataFrame({'source': x_train + x_valid, 'target' : y_train + y_valid })
df_valid = df_pairs.merge(df_train, on='source', indicator=True, how='left')\
.query('_merge=="left_only"')\
.drop('_merge', axis=1)\
.drop('target_y', axis=1)
# df_valid = df_valid.sample(frac=1).reset_index(drop=True).sample(50000)
with open('./datasets/remaining_sources', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['source'])
with open('./datasets/remaining_references', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['target_x'])
if (process_candidates):
with open('./datasets/remaining_candidates', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['candidates'])
|
[
"pandas.DataFrame"
] |
[((1215, 1287), 'pandas.DataFrame', 'pd.DataFrame', (["{'source': x_train + x_valid, 'target': y_train + y_valid}"], {}), "({'source': x_train + x_valid, 'target': y_train + y_valid})\n", (1227, 1287), True, 'import pandas as pd\n'), ((901, 988), 'pandas.DataFrame', 'pd.DataFrame', (["{'source': bytecodes, 'target': references, 'candidates': candidates}"], {}), "({'source': bytecodes, 'target': references, 'candidates':\n candidates})\n", (913, 988), True, 'import pandas as pd\n'), ((1008, 1065), 'pandas.DataFrame', 'pd.DataFrame', (["{'source': bytecodes, 'target': references}"], {}), "({'source': bytecodes, 'target': references})\n", (1020, 1065), True, 'import pandas as pd\n')]
|
import System
dataKey, _ = IN
OUT = System.AppDomain.CurrentDomain.GetData("_Dyn_Wireless_%s" % dataKey)
|
[
"System.AppDomain.CurrentDomain.GetData"
] |
[((37, 105), 'System.AppDomain.CurrentDomain.GetData', 'System.AppDomain.CurrentDomain.GetData', (["('_Dyn_Wireless_%s' % dataKey)"], {}), "('_Dyn_Wireless_%s' % dataKey)\n", (75, 105), False, 'import System\n')]
|
from quiet_coms import find_quiet_ports
from quiet import Quiet
import time
if 'EXIT_ON_FAIL' not in locals():
VERBOSE = True
EXIT_ON_FAIL = True
class QuietI2C(Quiet):
def __init__(self, coms, **kargs) -> None:
Quiet.__init__(self, coms, **kargs)
def raw_write(self, addr: int, data: bytearray):
command = f'IIC:ADDR {addr};WRIT'
self.writeIEEE(command, data)
def raw_read(self, addr: int, readCount: int) -> bytearray:
message = f'IIC:ADDR {addr};READ? {readCount}'
return self.queryIEEE(message)
def register_write(self, address: int, register:int, data: int, dataSize=2):
self.write(f'IIC:ADDR {address};REGI:ADDR {register};RSIZ {dataSize};WRIT {data}')
def register_read(self, address: int, register:int, dataSize=1) -> int:
return self.query_int(f'IIC:ADDR {address};REGI:ADDR {register};RSIZ {dataSize};READ?')
def enable_master_mode(self) -> None:
self.set_and_verify('IIC:MODE', 'MAST')
def disable(self) -> None:
self.set_and_verify('IIC:MODE', 'OFF')
def acknowledged(self) -> bool:
ack = self.query('IIC:ACK?')
return '1' in ack
def _i2c_register_read_test(i: QuietI2C, address:int, register:int, expectation:int):
value = i.register_read(address, register, 2)
if value != expectation:
message = f'Failure at {hex(address)}:{hex(register)}. Expected {hex(expectation)} but read {hex(value)}'
if EXIT_ON_FAIL:
raise Exception()
else:
print(message)
elif VERBOSE:
print(f'REG\t{hex(address)}:{hex(register)} -> {hex(value)}')
def _i2c_register_write_test(i: QuietI2C, address:int, register:int, value:int):
i.register_write(address, register, value, 2)
time.sleep(0.1)
_i2c_register_read_test(i, address, register, value)
def _i2c_raw_write(i: QuietI2C, address:int, data:bytearray):
i.raw_write(address, data)
if VERBOSE:
print(f'RAW\t{hex(address)} -> {str(data)[10:].strip(")")}')
def _i2c_raw_read_test(i: QuietI2C, address:int, expected:bytearray):
response = i.raw_read(address, len(expected))
if response != expected:
message = f'Failure at {hex(address)}. Expected {expected} but read {response}'
if EXIT_ON_FAIL:
raise Exception(message)
else:
print(message)
elif VERBOSE:
print(f'RAW\t{hex(address)} <- {response}')
def _i2c_check_error(i: QuietI2C, error_name: str, expectation: int):
error = i.error()
if error != expectation:
message = f'Failure {error_name}. Expected {hex(expectation)} received {hex(error)}'
if EXIT_ON_FAIL:
raise Exception(message)
else:
print(message)
elif VERBOSE:
print(f'{error_name.ljust(32)} Pass')
def _i2c_check_lower_limit(i: QuietI2C, command:str, low:int, error_name:str, error_code, delay:int=0):
under = low - 1
i.write(f'{command} {under}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'UNDER {error_name}', error_code if under >= 0 else 0x0110)
i.write(f'{command} {low}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'LOWER {error_name}', 0x00)
def _i2c_check_upper_limit(i: QuietI2C, command:str, high:int, error_name:str, error_code, delay:int=0):
i.write(f'{command} {high}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'UPPER {error_name}', 0x00)
i.write(f'{command} {high + 1}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'OVER {error_name}', error_code)
def _i2c_check_limit(i: QuietI2C, command:str, low:int, high:int, error_name:str, error_code):
_i2c_check_lower_limit(i, command, low, error_name, error_code)
_i2c_check_upper_limit(i, command, high, error_name, error_code)
def _i2c_check_acknowledge(i, expectation:bool):
ack = i.acknowledged()
if ack != expectation:
if ack:
message = f'Failure ACKNOWLEDGED. Expected NO_ACKNOWLEDGED received ACKNOWLEDGED'
else:
message = f'Failure ACKNOWLEDGED. Expected ACKNOWLEDGED received NO_ACKNOWLEDGED'
if EXIT_ON_FAIL:
raise Exception(message)
else:
print(message)
elif VERBOSE:
print(f'{("" if ack else "NO_")}ACKNOWLEDGED'.ljust(32) + ' Pass')
def i2c_test_errors(i: QuietI2C) -> bool:
# Clear Errors
i.error()
# Verify the second hook works
if i.query_int('SYST:ERR?') != 0:
messsage = 'Failure "SYS:ERR?" Command'
if EXIT_ON_FAIL:
raise Exception(messsage)
else:
print(messsage)
elif VERBOSE:
print('IIC:REGI:ERRO? Pass')
i.disable()
_i2c_check_error(i, 'ERROR_NONE', 0x00)
_i2c_check_limit(i, 'IIC:BAUD', 16000, 1000000, 'INVALID_BAUD', 0x0B01)
_i2c_check_limit(i, 'IIC:TIME', 1, 255, 'INVALID_TIMEOUT', 0x0B02)
_i2c_check_limit(i, 'IIC:ADDR', 0, 127, 'INVALID_ADDRESS', 0x0B03)
i.write('IIC:MODE MAS')
_i2c_check_error(i, 'ERROR_INVALID_MODE', 0x0B04)
_i2c_check_limit(i, 'IIC:REGI:RSIZ', 1, 2, 'INVALID_RSIZE', 0x0B20)
_i2c_check_limit(i, 'IIC:REGI:ADDR', 0, 255, 'INVALID_REGISTER_ADDRESS', 0x0B21)
i.write('IIC:REGI:WRIT 1')
_i2c_check_error(i, 'ERROR_DISABLED_WRITE', 0x0B10)
i.query('IIC:REGI:READ?')
i.com.flushInput()
_i2c_check_error(i, 'ERROR_DISABLED_READ', 0x0B11)
i.write('IIC:WRIT #11A')
_i2c_check_error(i, 'ERROR_DISABLED_WRITE', 0x0B10)
i.query('IIC:READ? 2')
_i2c_check_error(i, 'ERROR_DISABLED_READ', 0x0B11)
i.reset()
i.enable_master_mode()
try:
i.write('IIC:ADDR 0x50;REGI:ADDR 0xFF;RSIZ 1')
i.com.flushInput()
_i2c_check_upper_limit(i, 'IIC:REGI:WRIT', 255, 'INVALID_REGISTER_VALUE', 0x0B22, 0.1)
i.write('IIC:WRIT #10')
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_WRITE_SIZE', 0x0B31)
i.write('IIC:READ? 0')
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SIZE', 0x0B32)
i.write('IIC:WRIT #296' + '0123456789ABCDEF' * 6)
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_WRITE_SIZE', 0x0B31)
i.query('IIC:READ? 96')
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SIZE', 0x0B32)
i.write('IIC:READ?')
i.com.flushInput()
_i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SYNTAX', 0x0B33)
i.write('IIC:ADDR 0x10;WRIT #13ABC')
time.sleep(0.1)
_i2c_check_acknowledge(i, False)
finally:
i.disable()
def i2c_test(i: QuietI2C) -> bool:
i.reset()
i.enable_master_mode()
try:
_i2c_register_read_test(i, 0x50, 0xFE, 0x5449)
_i2c_register_read_test(i, 0x50, 0xFF, 0x1004)
_i2c_register_write_test(i, 0x50, 0x0C, 0x05AA)
_i2c_register_write_test(i, 0x50, 0x08, 0x1E00)
_i2c_register_write_test(i, 0x50, 0x0A, 0x5F80)
_i2c_raw_write(i, 0x50, bytearray([0xFF]))
_i2c_raw_read_test(i, 0x50, bytearray([0x10, 0x04]))
_i2c_raw_write(i, 0x50, bytearray([0x0C, 0x05, 0xA0]))
_i2c_raw_write(i, 0x50, bytearray([0x0C]))
_i2c_raw_read_test(i, 0x50, bytearray([0x05, 0xA0]))
finally:
i.disable()
if __name__ == "__main__":
q2c = QuietI2C(None, log_path='usb_log.txt')
i2c_test(q2c)
i2c_test_errors(q2c)
i2c_test(q2c)
print('All I2C Tests Passed')
|
[
"quiet.Quiet.__init__",
"time.sleep"
] |
[((1801, 1816), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1811, 1816), False, 'import time\n'), ((234, 269), 'quiet.Quiet.__init__', 'Quiet.__init__', (['self', 'coms'], {}), '(self, coms, **kargs)\n', (248, 269), False, 'from quiet import Quiet\n'), ((3044, 3061), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3054, 3061), False, 'import time\n'), ((3207, 3224), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3217, 3224), False, 'import time\n'), ((3445, 3462), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3455, 3462), False, 'import time\n'), ((3581, 3598), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3591, 3598), False, 'import time\n'), ((6039, 6054), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6049, 6054), False, 'import time\n'), ((6190, 6205), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6200, 6205), False, 'import time\n'), ((6367, 6382), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6377, 6382), False, 'import time\n'), ((6519, 6534), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6529, 6534), False, 'import time\n'), ((6782, 6797), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6792, 6797), False, 'import time\n')]
|
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
from collections import defaultdict
import numpy as np
import time
import random
import hashlib
import tensorflow as tf
from tensorflow import keras
from ..abstractions import display
class TunerStats(object):
"""Track tuner statistics."""
def __init__(self):
self.num_generated_models = 0 # overall number of instances generated
self.num_invalid_models = 0 # how many models didn't work
self.num_oversized_models = 0 # num models with params> max_params
def summary(self, extended=False):
display.subsection('Tuning stats')
display.display_settings(self.get_config())
def get_config(self):
return {
'num_generated_models': self.num_generated_models,
'num_invalid_models': self.num_invalid_models,
'num_oversized_models': self.num_oversized_models
}
@classmethod
def from_config(cls, config):
stats = cls()
stats.num_generated_models = config['num_generated_models']
stats.num_invalid_models = config['num_invalid_models']
stats.num_oversized_models = config['num_oversized_models']
return stats
def get_max_epochs_and_steps(fit_args, fit_kwargs):
if fit_args:
x = tf.nest.flatten(fit_args)[0]
else:
x = tf.nest.flatten(fit_kwargs.get('x'))[0]
batch_size = fit_kwargs.get('batch_size', 32)
if hasattr(x, '__len__'):
max_steps = math.ceil(float(len(x)) / batch_size)
else:
max_steps = fit_kwargs.get('steps')
max_epochs = fit_kwargs.get('epochs', 1)
return max_epochs, max_steps
class TunerCallback(keras.callbacks.Callback):
def __init__(self, tuner, trial, execution):
self.tuner = tuner
self.trial = trial
self.execution = execution
def on_epoch_begin(self, epoch, logs=None):
self.tuner.on_epoch_begin(
self.execution, self.model, epoch, logs=logs)
def on_batch_begin(self, batch, logs=None):
self.tuner.on_batch_begin(self.execution, self.model, batch, logs)
def on_batch_end(self, batch, logs=None):
self.tuner.on_batch_end(self.execution, self.model, batch, logs)
def on_epoch_end(self, epoch, logs=None):
self.tuner.on_epoch_end(
self.execution, self.model, epoch, logs=logs)
class Display(object):
def __init__(self, host):
self.host = host
self.cpu_usage = []
self.gpu_usage = []
self.batch_history = defaultdict(list)
self.epoch_pbar = None
def on_execution_begin(self, trial, execution, model):
# new model summary
if len(trial.executions) == 1:
display.section('New model')
trial.summary()
# execution info if needed
if trial.max_executions > 1:
display.subsection('Execution %d/%d' %
(len(trial.executions),
trial.max_executions))
def on_trial_end(self,
averaged_metrics,
best_metrics,
objective,
remaining_trials,
max_trials):
# train summary
current = averaged_metrics
best = best_metrics
rows = [['Name', 'Best model', 'Current model']]
for name in best.names:
best_value = round(best.get_best_value(name), 4)
current_value = round(current.get_best_value(name), 4)
row = [name, best_value, current_value]
if name == objective:
if best_value == current_value:
row = display.colorize_row(row, 'green')
else:
row = display.colorize_row(row, 'red')
rows.append(row)
display.display_table(rows)
# Tuning budget exhausted
if remaining_trials < 1:
display.highlight('Hypertuning complete - results in %s' %
self.host.results_dir)
# TODO: final summary
else:
display.highlight('%d/%d trials left' %
(remaining_trials, max_trials))
def on_epoch_begin(self, execution, model, epoch, logs=None):
# reset counters
self.epoch_history = defaultdict(list)
self.gpu_usage = []
self.cpu_usage = []
# epoch bar
self.epoch_pbar = display.progress_bar(
total=execution.max_steps,
leave=True,
unit='steps')
def on_epoch_end(self, execution, model, epoch, logs=None):
# compute stats
final_epoch_postfix = {}
for m, v in logs.items():
final_epoch_postfix[m] = round(v, 4)
# epoch bar
self.epoch_pbar.set_postfix(final_epoch_postfix)
self.epoch_pbar.close()
def on_batch_end(self, execution, model, batch, logs=None):
logs = logs or {}
self.epoch_pbar.update(1)
# computing metric statistics
for k, v in logs.items():
self.batch_history[k].append(v)
avg_metrics = self._avg_metrics(self.batch_history)
self.epoch_pbar.set_postfix(avg_metrics)
# create bar desc with updated statistics
description = ''
host_status = self.host.get_status()
if len(host_status['gpu']):
gpu_usage = [float(gpu['usage']) for gpu in host_status['gpu']]
gpu_usage = int(np.average(gpu_usage))
self.gpu_usage.append(gpu_usage)
description += '[GPU:%3s%%]' % int(np.average(self.gpu_usage))
self.cpu_usage.append(int(host_status['cpu']['usage']))
description += '[CPU:%3s%%]' % int(np.average(self.cpu_usage))
description += 'Epoch %s/%s' % (execution.epochs_seen + 1,
execution.max_epochs)
self.epoch_pbar.set_description(description)
def _avg_metrics(self, metrics):
agg_metrics = {}
for metric_name, values in metrics.items():
if metric_name == 'batch' or metric_name == 'size':
continue
agg_metrics[metric_name] = '%.4f' % np.average(values)
return agg_metrics
def generate_trial_id():
s = str(time.time()) + str(random.randint(1, 1e7))
return hashlib.sha256(s.encode('utf-8')).hexdigest()[:32]
def format_execution_id(i, executions_per_trial):
execution_id_length = math.ceil(
math.log(executions_per_trial, 10))
execution_id_template = '%0' + str(execution_id_length) + 'd'
execution_id = execution_id_template % i
return execution_id
@contextlib.contextmanager
def maybe_distribute(distribution_strategy):
if distribution_strategy is None:
yield
else:
with distribution_strategy.scope():
yield
|
[
"numpy.average",
"math.log",
"collections.defaultdict",
"tensorflow.nest.flatten",
"time.time",
"random.randint"
] |
[((3247, 3264), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3258, 3264), False, 'from collections import defaultdict\n'), ((5053, 5070), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5064, 5070), False, 'from collections import defaultdict\n'), ((7212, 7246), 'math.log', 'math.log', (['executions_per_trial', '(10)'], {}), '(executions_per_trial, 10)\n', (7220, 7246), False, 'import math\n'), ((2008, 2033), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['fit_args'], {}), '(fit_args)\n', (2023, 2033), True, 'import tensorflow as tf\n'), ((7010, 7021), 'time.time', 'time.time', ([], {}), '()\n', (7019, 7021), False, 'import time\n'), ((7029, 7058), 'random.randint', 'random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (7043, 7058), False, 'import random\n'), ((6212, 6233), 'numpy.average', 'np.average', (['gpu_usage'], {}), '(gpu_usage)\n', (6222, 6233), True, 'import numpy as np\n'), ((6463, 6489), 'numpy.average', 'np.average', (['self.cpu_usage'], {}), '(self.cpu_usage)\n', (6473, 6489), True, 'import numpy as np\n'), ((6925, 6943), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (6935, 6943), True, 'import numpy as np\n'), ((6327, 6353), 'numpy.average', 'np.average', (['self.gpu_usage'], {}), '(self.gpu_usage)\n', (6337, 6353), True, 'import numpy as np\n')]
|
import warnings
from typing import Callable, Optional, TypeVar, cast
CallableType = TypeVar("CallableType", bound=Callable)
def deprecation_wrapper(message: str, function_or_class: CallableType) -> CallableType:
"""Creates a wrapper for a deprecated function or class. Prints a warning
the first time a function or class is called.
Args:
message (str): Warning message.
function_or_class (CallableType): Function or class to wrap.
Returns:
CallableType: Wrapped function/class.
"""
warned = False
def curried(*args, **kwargs): # pragma: no cover
nonlocal warned
if not warned:
warnings.warn(message, DeprecationWarning, stacklevel=2)
warned = True
return function_or_class(*args, **kwargs)
return cast(CallableType, curried)
def new_name_wrapper(
old_name: str, new_name: str, function_or_class: CallableType
) -> CallableType:
"""Creates a wrapper for a renamed function or class. Prints a warning the first
time a function or class is called with the old name.
Args:
old_name (str): Old name of function or class. Printed in warning.
new_name (str): New name of function or class. Printed in warning.
function_or_class (CallableType): Function or class to wrap.
Returns:
CallableType: Wrapped function/class.
"""
return deprecation_wrapper(
f"{old_name} is deprecated! Use {new_name} instead.", function_or_class
)
|
[
"warnings.warn",
"typing.cast",
"typing.TypeVar"
] |
[((85, 124), 'typing.TypeVar', 'TypeVar', (['"""CallableType"""'], {'bound': 'Callable'}), "('CallableType', bound=Callable)\n", (92, 124), False, 'from typing import Callable, Optional, TypeVar, cast\n'), ((810, 837), 'typing.cast', 'cast', (['CallableType', 'curried'], {}), '(CallableType, curried)\n', (814, 837), False, 'from typing import Callable, Optional, TypeVar, cast\n'), ((665, 721), 'warnings.warn', 'warnings.warn', (['message', 'DeprecationWarning'], {'stacklevel': '(2)'}), '(message, DeprecationWarning, stacklevel=2)\n', (678, 721), False, 'import warnings\n')]
|
import json
data = {
"users": [
{"Name": "Dominator", "skill": 100, "gold": 99999, "weapons": ['Sword', 'Atomic Laser']},
{"Name": "Looser", "skill": 1, "gold": -100000, "weapons": [None, None, None]},
]
}
with open("example.json", "w") as f:
s = json.dumps(data, indent=4)
f.write(s)
|
[
"json.dumps"
] |
[((277, 303), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (287, 303), False, 'import json\n')]
|
import random as r
# Sets up required variables
running = True
user_wins = 0
comp_wins = 0
answers = ["R", "P", "S"]
win_combos = ["PR", "RS", "SP"]
# Welcome message
print("Welcome to Rock-Paper-Scissors. Please input one of the following:"
"\n'R' - rock\n'P' - paper\n'S' - scissors\nto get started.")
while running:
# Running game of rock, paper, scissors
if user_wins == 3 or comp_wins == 3:
print(f"Game is over. The score was {user_wins}-{comp_wins}. Thanks for playing.")
break
user_guess = input("Guess:").upper()
if user_guess.upper() not in answers:
print("You didn't enter a valid letter.")
break
comp_guess = answers[r.randint(0, 2)]
guess_join = user_guess + comp_guess
if guess_join[0] == guess_join[1]:
print(f"You both guessed {user_guess}!\nThe current score is {user_wins}-{comp_wins}.")
else:
# Checks to see if computer or user has won the round.
if any(guess_join == elem in win_combos for elem in win_combos):
user_wins += 1
print(f"You win! Score is {user_wins}-{comp_wins}.")
else:
comp_wins += 1
print(f"You lose! Score is {user_wins}-{comp_wins}.")
|
[
"random.randint"
] |
[((693, 708), 'random.randint', 'r.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (702, 708), True, 'import random as r\n')]
|
from baselines import deepq
def add_opts(parser):
pass
class BaselinesDQNAgent(object):
'''
classdocs
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': True,
}
self.opts = opts
self.agent = None
def configure(self, observation_space_shape, nb_actions):
pass
def train(self, env, nb_steps, visualize, verbosity):
model = deepq.models.mlp([64])
self.agent = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=nb_steps,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10 if verbosity else None,
callback=env.render if visualize else None
)
def test(self, env, nb_episodes, visualize):
episodes = 0
while episodes < nb_episodes:
obs, done = env.reset(), False
episode_rew = 0
while not done:
if visualize:
env.render()
obs, rew, done, _ = env.step(self.agent(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
episodes += 1
def load_weights(self, load_file):
self.agent = deepq.load(load_file)
def save_weights(self, save_file, overwrite):
self.agent.save(save_file)
|
[
"baselines.deepq.learn",
"baselines.deepq.models.mlp",
"baselines.deepq.load"
] |
[((368, 390), 'baselines.deepq.models.mlp', 'deepq.models.mlp', (['[64]'], {}), '([64])\n', (384, 390), False, 'from baselines import deepq\n'), ((406, 637), 'baselines.deepq.learn', 'deepq.learn', (['env'], {'q_func': 'model', 'lr': '(0.001)', 'max_timesteps': 'nb_steps', 'buffer_size': '(50000)', 'exploration_fraction': '(0.1)', 'exploration_final_eps': '(0.02)', 'print_freq': '(10 if verbosity else None)', 'callback': '(env.render if visualize else None)'}), '(env, q_func=model, lr=0.001, max_timesteps=nb_steps,\n buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02,\n print_freq=10 if verbosity else None, callback=env.render if visualize else\n None)\n', (417, 637), False, 'from baselines import deepq\n'), ((1049, 1070), 'baselines.deepq.load', 'deepq.load', (['load_file'], {}), '(load_file)\n', (1059, 1070), False, 'from baselines import deepq\n')]
|
#!/usr/bin/python3
import requests
import click
from rich import inspect
from rich.console import Console
from url_normalize import url_normalize
from urllib.parse import quote
console = Console()
def shell_encode(string):
return string.replace(" ", "${IFS}")
@click.command()
@click.option("-u", "--url", prompt="Target URL", help="The URL of the Cacti installation")
@click.option("-p", "--payload", prompt="Payload", help="The payload that you want to execute on the target")
def exploit(url, payload):
"""Cacti v1.2.8 Unauthenticated Remote Code Execution"""
# Normalize URL input, URL encode the payload
url = url + "/graph_realtime.php?action=init"
url = url_normalize(url, default_scheme="http")
payload = shell_encode(payload)
payload = quote(payload)
cookies = {"Cacti": payload}
# Check if target is vulnerable
try:
with console.status("Checking to see if target is vulnerable"):
request = requests.get(url)
except:
console.print(f'Could not connect to the host, please check the URL again: {url}', style="red")
exit(1)
inspect(request)
if request.status_code == 200:
with console.status("Realtime graph found, sending payload."):
requests.get(url, cookies=cookies)
else:
click.echo("Realtime graph not found. The target may not be vulnerable.")
if __name__ == "__main__":
exploit()
|
[
"rich.inspect",
"click.option",
"url_normalize.url_normalize",
"urllib.parse.quote",
"requests.get",
"rich.console.Console",
"click.echo",
"click.command"
] |
[((189, 198), 'rich.console.Console', 'Console', ([], {}), '()\n', (196, 198), False, 'from rich.console import Console\n'), ((269, 284), 'click.command', 'click.command', ([], {}), '()\n', (282, 284), False, 'import click\n'), ((286, 381), 'click.option', 'click.option', (['"""-u"""', '"""--url"""'], {'prompt': '"""Target URL"""', 'help': '"""The URL of the Cacti installation"""'}), "('-u', '--url', prompt='Target URL', help=\n 'The URL of the Cacti installation')\n", (298, 381), False, 'import click\n'), ((378, 491), 'click.option', 'click.option', (['"""-p"""', '"""--payload"""'], {'prompt': '"""Payload"""', 'help': '"""The payload that you want to execute on the target"""'}), "('-p', '--payload', prompt='Payload', help=\n 'The payload that you want to execute on the target')\n", (390, 491), False, 'import click\n'), ((686, 727), 'url_normalize.url_normalize', 'url_normalize', (['url'], {'default_scheme': '"""http"""'}), "(url, default_scheme='http')\n", (699, 727), False, 'from url_normalize import url_normalize\n'), ((778, 792), 'urllib.parse.quote', 'quote', (['payload'], {}), '(payload)\n', (783, 792), False, 'from urllib.parse import quote\n'), ((1121, 1137), 'rich.inspect', 'inspect', (['request'], {}), '(request)\n', (1128, 1137), False, 'from rich import inspect\n'), ((1309, 1382), 'click.echo', 'click.echo', (['"""Realtime graph not found. The target may not be vulnerable."""'], {}), "('Realtime graph not found. The target may not be vulnerable.')\n", (1319, 1382), False, 'import click\n'), ((966, 983), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (978, 983), False, 'import requests\n'), ((1256, 1290), 'requests.get', 'requests.get', (['url'], {'cookies': 'cookies'}), '(url, cookies=cookies)\n', (1268, 1290), False, 'import requests\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: terra/treasury/v1beta1/genesis.proto, terra/treasury/v1beta1/query.proto, terra/treasury/v1beta1/treasury.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
@dataclass(eq=False, repr=False)
class Params(betterproto.Message):
"""Params defines the parameters for the oracle module."""
tax_policy: "PolicyConstraints" = betterproto.message_field(1)
reward_policy: "PolicyConstraints" = betterproto.message_field(2)
seigniorage_burden_target: str = betterproto.string_field(3)
mining_increment: str = betterproto.string_field(4)
window_short: int = betterproto.uint64_field(5)
window_long: int = betterproto.uint64_field(6)
window_probation: int = betterproto.uint64_field(7)
@dataclass(eq=False, repr=False)
class PolicyConstraints(betterproto.Message):
"""
PolicyConstraints - defines policy constraints can be applied in tax &
reward policies
"""
rate_min: str = betterproto.string_field(1)
rate_max: str = betterproto.string_field(2)
cap: "___cosmos_base_v1_beta1__.Coin" = betterproto.message_field(3)
change_rate_max: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class EpochTaxProceeds(betterproto.Message):
"""
EpochTaxProceeds represents the tax amount collected at the current epoch
"""
tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class EpochInitialIssuance(betterproto.Message):
"""
EpochInitialIssuance represents initial issuance of the currrent epoch
"""
issuance: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxRateRequest(betterproto.Message):
"""
QueryTaxRateRequest is the request type for the Query/TaxRate RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryTaxRateResponse(betterproto.Message):
"""
QueryTaxRateResponse is response type for the Query/TaxRate RPC method.
"""
tax_rate: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxCapRequest(betterproto.Message):
"""
QueryTaxCapRequest is the request type for the Query/TaxCap RPC method.
"""
# denom defines the denomination to query for.
denom: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxCapResponse(betterproto.Message):
"""
QueryTaxCapResponse is response type for the Query/TaxCap RPC method.
"""
tax_cap: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxCapsRequest(betterproto.Message):
"""
QueryTaxCapsRequest is the request type for the Query/TaxCaps RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryTaxCapsResponseItem(betterproto.Message):
"""
QueryTaxCapsResponseItem is response item type for the Query/TaxCaps RPC
method.
"""
denom: str = betterproto.string_field(1)
tax_cap: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class QueryTaxCapsResponse(betterproto.Message):
"""
QueryTaxCapsResponse is response type for the Query/TaxCaps RPC method.
"""
tax_caps: List["QueryTaxCapsResponseItem"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryRewardWeightRequest(betterproto.Message):
"""
QueryRewardWeightRequest is the request type for the Query/RewardWeight RPC
method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryRewardWeightResponse(betterproto.Message):
"""
QueryRewardWeightResponse is response type for the Query/RewardWeight RPC
method.
"""
reward_weight: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxProceedsRequest(betterproto.Message):
"""
QueryTaxProceedsRequest is the request type for the Query/TaxProceeds RPC
method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryTaxProceedsResponse(betterproto.Message):
"""
QueryTaxProceedsResponse is response type for the Query/TaxProceeds RPC
method.
"""
tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QuerySeigniorageProceedsRequest(betterproto.Message):
"""
QuerySeigniorageProceedsRequest is the request type for the
Query/SeigniorageProceeds RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QuerySeigniorageProceedsResponse(betterproto.Message):
"""
QuerySeigniorageProceedsResponse is response type for the
Query/SeigniorageProceeds RPC method.
"""
seigniorage_proceeds: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryIndicatorsRequest(betterproto.Message):
"""
QueryIndicatorsRequest is the request type for the Query/Indicators RPC
method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryIndicatorsResponse(betterproto.Message):
"""
QueryIndicatorsResponse is response type for the Query/Indicators RPC
method.
"""
trl_year: str = betterproto.string_field(1)
trl_month: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class QueryParamsRequest(betterproto.Message):
"""
QueryParamsRequest is the request type for the Query/Params RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryParamsResponse(betterproto.Message):
"""
QueryParamsResponse is the response type for the Query/Params RPC method.
"""
# params defines the parameters of the module.
params: "Params" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GenesisState(betterproto.Message):
"""GenesisState defines the oracle module's genesis state."""
params: "Params" = betterproto.message_field(1)
tax_rate: str = betterproto.string_field(2)
reward_weight: str = betterproto.string_field(3)
tax_caps: List["TaxCap"] = betterproto.message_field(4)
tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(5)
epoch_initial_issuance: List[
"___cosmos_base_v1_beta1__.Coin"
] = betterproto.message_field(6)
epoch_states: List["EpochState"] = betterproto.message_field(7)
@dataclass(eq=False, repr=False)
class TaxCap(betterproto.Message):
"""TaxCap is the max tax amount can be charged for the given denom"""
denom: str = betterproto.string_field(1)
tax_cap: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class EpochState(betterproto.Message):
"""EpochState is the record for each epoch state"""
epoch: int = betterproto.uint64_field(1)
tax_reward: str = betterproto.string_field(2)
seigniorage_reward: str = betterproto.string_field(3)
total_staked_luna: str = betterproto.string_field(4)
class QueryStub(betterproto.ServiceStub):
async def tax_rate(self) -> "QueryTaxRateResponse":
request = QueryTaxRateRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxRate", request, QueryTaxRateResponse
)
async def tax_cap(self, *, denom: str = "") -> "QueryTaxCapResponse":
request = QueryTaxCapRequest()
request.denom = denom
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxCap", request, QueryTaxCapResponse
)
async def tax_caps(self) -> "QueryTaxCapsResponse":
request = QueryTaxCapsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxCaps", request, QueryTaxCapsResponse
)
async def reward_weight(self) -> "QueryRewardWeightResponse":
request = QueryRewardWeightRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/RewardWeight",
request,
QueryRewardWeightResponse,
)
async def seigniorage_proceeds(self) -> "QuerySeigniorageProceedsResponse":
request = QuerySeigniorageProceedsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/SeigniorageProceeds",
request,
QuerySeigniorageProceedsResponse,
)
async def tax_proceeds(self) -> "QueryTaxProceedsResponse":
request = QueryTaxProceedsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxProceeds",
request,
QueryTaxProceedsResponse,
)
async def indicators(self) -> "QueryIndicatorsResponse":
request = QueryIndicatorsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/Indicators", request, QueryIndicatorsResponse
)
async def params(self) -> "QueryParamsResponse":
request = QueryParamsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/Params", request, QueryParamsResponse
)
class QueryBase(ServiceBase):
async def tax_rate(self) -> "QueryTaxRateResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def tax_cap(self, denom: str) -> "QueryTaxCapResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def tax_caps(self) -> "QueryTaxCapsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def reward_weight(self) -> "QueryRewardWeightResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def seigniorage_proceeds(self) -> "QuerySeigniorageProceedsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def tax_proceeds(self) -> "QueryTaxProceedsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def indicators(self) -> "QueryIndicatorsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def params(self) -> "QueryParamsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_tax_rate(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.tax_rate(**request_kwargs)
await stream.send_message(response)
async def __rpc_tax_cap(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"denom": request.denom,
}
response = await self.tax_cap(**request_kwargs)
await stream.send_message(response)
async def __rpc_tax_caps(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.tax_caps(**request_kwargs)
await stream.send_message(response)
async def __rpc_reward_weight(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.reward_weight(**request_kwargs)
await stream.send_message(response)
async def __rpc_seigniorage_proceeds(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.seigniorage_proceeds(**request_kwargs)
await stream.send_message(response)
async def __rpc_tax_proceeds(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.tax_proceeds(**request_kwargs)
await stream.send_message(response)
async def __rpc_indicators(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.indicators(**request_kwargs)
await stream.send_message(response)
async def __rpc_params(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.params(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/terra.treasury.v1beta1.Query/TaxRate": grpclib.const.Handler(
self.__rpc_tax_rate,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxRateRequest,
QueryTaxRateResponse,
),
"/terra.treasury.v1beta1.Query/TaxCap": grpclib.const.Handler(
self.__rpc_tax_cap,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxCapRequest,
QueryTaxCapResponse,
),
"/terra.treasury.v1beta1.Query/TaxCaps": grpclib.const.Handler(
self.__rpc_tax_caps,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxCapsRequest,
QueryTaxCapsResponse,
),
"/terra.treasury.v1beta1.Query/RewardWeight": grpclib.const.Handler(
self.__rpc_reward_weight,
grpclib.const.Cardinality.UNARY_UNARY,
QueryRewardWeightRequest,
QueryRewardWeightResponse,
),
"/terra.treasury.v1beta1.Query/SeigniorageProceeds": grpclib.const.Handler(
self.__rpc_seigniorage_proceeds,
grpclib.const.Cardinality.UNARY_UNARY,
QuerySeigniorageProceedsRequest,
QuerySeigniorageProceedsResponse,
),
"/terra.treasury.v1beta1.Query/TaxProceeds": grpclib.const.Handler(
self.__rpc_tax_proceeds,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxProceedsRequest,
QueryTaxProceedsResponse,
),
"/terra.treasury.v1beta1.Query/Indicators": grpclib.const.Handler(
self.__rpc_indicators,
grpclib.const.Cardinality.UNARY_UNARY,
QueryIndicatorsRequest,
QueryIndicatorsResponse,
),
"/terra.treasury.v1beta1.Query/Params": grpclib.const.Handler(
self.__rpc_params,
grpclib.const.Cardinality.UNARY_UNARY,
QueryParamsRequest,
QueryParamsResponse,
),
}
from ....cosmos.base import v1beta1 as ___cosmos_base_v1_beta1__
|
[
"dataclasses.dataclass",
"grpclib.const.Handler",
"grpclib.GRPCError",
"betterproto.string_field",
"betterproto.uint64_field",
"betterproto.message_field"
] |
[((369, 400), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (378, 400), False, 'from dataclasses import dataclass\n'), ((920, 951), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (929, 951), False, 'from dataclasses import dataclass\n'), ((1337, 1368), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (1346, 1368), False, 'from dataclasses import dataclass\n'), ((1600, 1631), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (1609, 1631), False, 'from dataclasses import dataclass\n'), ((1860, 1891), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (1869, 1891), False, 'from dataclasses import dataclass\n'), ((2047, 2078), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2056, 2078), False, 'from dataclasses import dataclass\n'), ((2272, 2303), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2281, 2303), False, 'from dataclasses import dataclass\n'), ((2543, 2574), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2552, 2574), False, 'from dataclasses import dataclass\n'), ((2764, 2795), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2773, 2795), False, 'from dataclasses import dataclass\n'), ((2951, 2982), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2960, 2982), False, 'from dataclasses import dataclass\n'), ((3237, 3268), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3246, 3268), False, 'from dataclasses import dataclass\n'), ((3492, 3523), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3501, 3523), False, 'from dataclasses import dataclass\n'), ((3698, 3729), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3707, 3729), False, 'from dataclasses import dataclass\n'), ((3947, 3978), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3956, 3978), False, 'from dataclasses import dataclass\n'), ((4150, 4181), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4159, 4181), False, 'from dataclasses import dataclass\n'), ((4431, 4462), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4440, 4462), False, 'from dataclasses import dataclass\n'), ((4658, 4689), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4667, 4689), False, 'from dataclasses import dataclass\n'), ((4935, 4966), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4944, 4966), False, 'from dataclasses import dataclass\n'), ((5135, 5166), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (5144, 5166), False, 'from dataclasses import dataclass\n'), ((5422, 5453), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (5431, 5453), False, 'from dataclasses import dataclass\n'), ((5606, 5637), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (5615, 5637), False, 'from dataclasses import dataclass\n'), ((5887, 5918), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (5896, 5918), False, 'from dataclasses import dataclass\n'), ((6511, 6542), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (6520, 6542), False, 'from dataclasses import dataclass\n'), ((6748, 6779), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (6757, 6779), False, 'from dataclasses import dataclass\n'), ((538, 566), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (563, 566), False, 'import betterproto\n'), ((608, 636), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (633, 636), False, 'import betterproto\n'), ((674, 701), 'betterproto.string_field', 'betterproto.string_field', (['(3)'], {}), '(3)\n', (698, 701), False, 'import betterproto\n'), ((730, 757), 'betterproto.string_field', 'betterproto.string_field', (['(4)'], {}), '(4)\n', (754, 757), False, 'import betterproto\n'), ((782, 809), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(5)'], {}), '(5)\n', (806, 809), False, 'import betterproto\n'), ((833, 860), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(6)'], {}), '(6)\n', (857, 860), False, 'import betterproto\n'), ((889, 916), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(7)'], {}), '(7)\n', (913, 916), False, 'import betterproto\n'), ((1130, 1157), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (1154, 1157), False, 'import betterproto\n'), ((1178, 1205), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (1202, 1205), False, 'import betterproto\n'), ((1250, 1278), 'betterproto.message_field', 'betterproto.message_field', (['(3)'], {}), '(3)\n', (1275, 1278), False, 'import betterproto\n'), ((1306, 1333), 'betterproto.string_field', 'betterproto.string_field', (['(4)'], {}), '(4)\n', (1330, 1333), False, 'import betterproto\n'), ((1568, 1596), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (1593, 1596), False, 'import betterproto\n'), ((1828, 1856), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (1853, 1856), False, 'import betterproto\n'), ((2241, 2268), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (2265, 2268), False, 'import betterproto\n'), ((2512, 2539), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (2536, 2539), False, 'import betterproto\n'), ((2733, 2760), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (2757, 2760), False, 'import betterproto\n'), ((3159, 3186), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (3183, 3186), False, 'import betterproto\n'), ((3206, 3233), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (3230, 3233), False, 'import betterproto\n'), ((3460, 3488), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (3485, 3488), False, 'import betterproto\n'), ((3916, 3943), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (3940, 3943), False, 'import betterproto\n'), ((4399, 4427), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (4424, 4427), False, 'import betterproto\n'), ((4904, 4931), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (4928, 4931), False, 'import betterproto\n'), ((5342, 5369), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (5366, 5369), False, 'import betterproto\n'), ((5391, 5418), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (5415, 5418), False, 'import betterproto\n'), ((5855, 5883), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (5880, 5883), False, 'import betterproto\n'), ((6050, 6078), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (6075, 6078), False, 'import betterproto\n'), ((6099, 6126), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (6123, 6126), False, 'import betterproto\n'), ((6152, 6179), 'betterproto.string_field', 'betterproto.string_field', (['(3)'], {}), '(3)\n', (6176, 6179), False, 'import betterproto\n'), ((6211, 6239), 'betterproto.message_field', 'betterproto.message_field', (['(4)'], {}), '(4)\n', (6236, 6239), False, 'import betterproto\n'), ((6299, 6327), 'betterproto.message_field', 'betterproto.message_field', (['(5)'], {}), '(5)\n', (6324, 6327), False, 'import betterproto\n'), ((6411, 6439), 'betterproto.message_field', 'betterproto.message_field', (['(6)'], {}), '(6)\n', (6436, 6439), False, 'import betterproto\n'), ((6479, 6507), 'betterproto.message_field', 'betterproto.message_field', (['(7)'], {}), '(7)\n', (6504, 6507), False, 'import betterproto\n'), ((6670, 6697), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (6694, 6697), False, 'import betterproto\n'), ((6717, 6744), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (6741, 6744), False, 'import betterproto\n'), ((6893, 6920), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(1)'], {}), '(1)\n', (6917, 6920), False, 'import betterproto\n'), ((6943, 6970), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (6967, 6970), False, 'import betterproto\n'), ((7001, 7028), 'betterproto.string_field', 'betterproto.string_field', (['(3)'], {}), '(3)\n', (7025, 7028), False, 'import betterproto\n'), ((7058, 7085), 'betterproto.string_field', 'betterproto.string_field', (['(4)'], {}), '(4)\n', (7082, 7085), False, 'import betterproto\n'), ((9320, 9373), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (9337, 9373), False, 'import grpclib\n'), ((9455, 9508), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (9472, 9508), False, 'import grpclib\n'), ((9580, 9633), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (9597, 9633), False, 'import grpclib\n'), ((9715, 9768), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (9732, 9768), False, 'import grpclib\n'), ((9864, 9917), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (9881, 9917), False, 'import grpclib\n'), ((9997, 10050), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (10014, 10050), False, 'import grpclib\n'), ((10127, 10180), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (10144, 10180), False, 'import grpclib\n'), ((10249, 10302), 'grpclib.GRPCError', 'grpclib.GRPCError', (['grpclib.const.Status.UNIMPLEMENTED'], {}), '(grpclib.const.Status.UNIMPLEMENTED)\n', (10266, 10302), False, 'import grpclib\n'), ((12546, 12675), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_tax_rate', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryTaxRateRequest', 'QueryTaxRateResponse'], {}), '(self.__rpc_tax_rate, grpclib.const.Cardinality.\n UNARY_UNARY, QueryTaxRateRequest, QueryTaxRateResponse)\n', (12567, 12675), False, 'import grpclib\n'), ((12803, 12929), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_tax_cap', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryTaxCapRequest', 'QueryTaxCapResponse'], {}), '(self.__rpc_tax_cap, grpclib.const.Cardinality.\n UNARY_UNARY, QueryTaxCapRequest, QueryTaxCapResponse)\n', (12824, 12929), False, 'import grpclib\n'), ((13058, 13187), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_tax_caps', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryTaxCapsRequest', 'QueryTaxCapsResponse'], {}), '(self.__rpc_tax_caps, grpclib.const.Cardinality.\n UNARY_UNARY, QueryTaxCapsRequest, QueryTaxCapsResponse)\n', (13079, 13187), False, 'import grpclib\n'), ((13321, 13465), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_reward_weight', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryRewardWeightRequest', 'QueryRewardWeightResponse'], {}), '(self.__rpc_reward_weight, grpclib.const.Cardinality.\n UNARY_UNARY, QueryRewardWeightRequest, QueryRewardWeightResponse)\n', (13342, 13465), False, 'import grpclib\n'), ((13606, 13775), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_seigniorage_proceeds', 'grpclib.const.Cardinality.UNARY_UNARY', 'QuerySeigniorageProceedsRequest', 'QuerySeigniorageProceedsResponse'], {}), '(self.__rpc_seigniorage_proceeds, grpclib.const.\n Cardinality.UNARY_UNARY, QuerySeigniorageProceedsRequest,\n QuerySeigniorageProceedsResponse)\n', (13627, 13775), False, 'import grpclib\n'), ((13904, 14045), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_tax_proceeds', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryTaxProceedsRequest', 'QueryTaxProceedsResponse'], {}), '(self.__rpc_tax_proceeds, grpclib.const.Cardinality.\n UNARY_UNARY, QueryTaxProceedsRequest, QueryTaxProceedsResponse)\n', (13925, 14045), False, 'import grpclib\n'), ((14177, 14314), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_indicators', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryIndicatorsRequest', 'QueryIndicatorsResponse'], {}), '(self.__rpc_indicators, grpclib.const.Cardinality.\n UNARY_UNARY, QueryIndicatorsRequest, QueryIndicatorsResponse)\n', (14198, 14314), False, 'import grpclib\n'), ((14442, 14567), 'grpclib.const.Handler', 'grpclib.const.Handler', (['self.__rpc_params', 'grpclib.const.Cardinality.UNARY_UNARY', 'QueryParamsRequest', 'QueryParamsResponse'], {}), '(self.__rpc_params, grpclib.const.Cardinality.\n UNARY_UNARY, QueryParamsRequest, QueryParamsResponse)\n', (14463, 14567), False, 'import grpclib\n')]
|
import jax
import elegy
import unittest
import numpy as np
import jax.numpy as jnp
import optax
class MLP(elegy.Module):
"""Standard LeNet-300-100 MLP network."""
n1: int
n2: int
def __init__(self, n1: int = 3, n2: int = 4):
super().__init__()
self.n1 = n1
self.n2 = n2
def call(self, image: jnp.ndarray, training: bool):
x = image.astype(jnp.float32) / 255.0
x = jnp.reshape(x, [x.shape[0], -1])
x = elegy.nn.Linear(self.n1)(x)
x = elegy.nn.BatchNormalization()(x)
x = jax.nn.relu(x)
x = elegy.nn.Linear(self.n2)(x)
x = jax.nn.relu(x)
x = elegy.nn.Linear(10)(x)
return x
class OptimizerTest(unittest.TestCase):
def test_optimizer(self):
optax_op = optax.adam(1e-3)
lr_schedule = lambda step, epoch: step / 3
optimizer = elegy.Optimizer(optax_op, lr_schedule=lr_schedule)
params = np.random.uniform((3, 4))
grads = np.random.uniform((3, 4))
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(rng, params)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 2 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 3 / 3)
def test_optimizer_epoch(self):
optax_op = optax.adam(1e-3)
lr_schedule = lambda step, epoch: epoch
optimizer = elegy.Optimizer(
optax_op, lr_schedule=lr_schedule, steps_per_epoch=2
)
params = np.random.uniform((3, 4))
grads = np.random.uniform((3, 4))
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(
rng=rng,
net_params=params,
)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
def test_optimizer_chain(self):
optimizer = elegy.Optimizer(
optax.sgd(0.1),
optax.clip(0.5),
)
params = np.zeros(shape=(3, 4))
grads = np.ones(shape=(3, 4)) * 100_000
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(
rng=rng,
net_params=params,
)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert np.all(-0.5 <= params) and np.all(params <= 0.5)
def test_lr_logging(self):
model = elegy.Model(
module=MLP(n1=3, n2=1),
loss=elegy.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=elegy.metrics.SparseCategoricalAccuracy(),
optimizer=elegy.Optimizer(
optax.adamw(1.0, b1=0.95),
lr_schedule=lambda step, epoch: jnp.array(1e-3),
),
run_eagerly=True,
)
X = np.random.uniform(size=(5, 7, 7))
y = np.random.randint(10, size=(5,))
history = model.fit(
x=X,
y=y,
epochs=1,
steps_per_epoch=1,
batch_size=5,
validation_data=(X, y),
shuffle=True,
verbose=0,
)
assert "lr" in history.history
assert np.allclose(history.history["lr"], 1e-3)
|
[
"elegy.nn.BatchNormalization",
"optax.adamw",
"jax.nn.relu",
"optax.adam",
"elegy.Optimizer",
"elegy.metrics.SparseCategoricalAccuracy",
"elegy.nn.Linear",
"numpy.allclose",
"numpy.ones",
"optax.sgd",
"elegy.RNGSeq",
"optax.clip",
"jax.numpy.reshape",
"jax.numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.all",
"elegy.losses.SparseCategoricalCrossentropy"
] |
[((431, 463), 'jax.numpy.reshape', 'jnp.reshape', (['x', '[x.shape[0], -1]'], {}), '(x, [x.shape[0], -1])\n', (442, 463), True, 'import jax.numpy as jnp\n'), ((561, 575), 'jax.nn.relu', 'jax.nn.relu', (['x'], {}), '(x)\n', (572, 575), False, 'import jax\n'), ((629, 643), 'jax.nn.relu', 'jax.nn.relu', (['x'], {}), '(x)\n', (640, 643), False, 'import jax\n'), ((788, 805), 'optax.adam', 'optax.adam', (['(0.001)'], {}), '(0.001)\n', (798, 805), False, 'import optax\n'), ((877, 927), 'elegy.Optimizer', 'elegy.Optimizer', (['optax_op'], {'lr_schedule': 'lr_schedule'}), '(optax_op, lr_schedule=lr_schedule)\n', (892, 927), False, 'import elegy\n'), ((946, 971), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (963, 971), True, 'import numpy as np\n'), ((988, 1013), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (1005, 1013), True, 'import numpy as np\n'), ((1028, 1044), 'elegy.RNGSeq', 'elegy.RNGSeq', (['(42)'], {}), '(42)\n', (1040, 1044), False, 'import elegy\n'), ((1727, 1744), 'optax.adam', 'optax.adam', (['(0.001)'], {}), '(0.001)\n', (1737, 1744), False, 'import optax\n'), ((1813, 1882), 'elegy.Optimizer', 'elegy.Optimizer', (['optax_op'], {'lr_schedule': 'lr_schedule', 'steps_per_epoch': '(2)'}), '(optax_op, lr_schedule=lr_schedule, steps_per_epoch=2)\n', (1828, 1882), False, 'import elegy\n'), ((1923, 1948), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (1940, 1948), True, 'import numpy as np\n'), ((1965, 1990), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (1982, 1990), True, 'import numpy as np\n'), ((2005, 2021), 'elegy.RNGSeq', 'elegy.RNGSeq', (['(42)'], {}), '(42)\n', (2017, 2021), False, 'import elegy\n'), ((2932, 2954), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, 4)'}), '(shape=(3, 4))\n', (2940, 2954), True, 'import numpy as np\n'), ((3017, 3033), 'elegy.RNGSeq', 'elegy.RNGSeq', (['(42)'], {}), '(42)\n', (3029, 3033), False, 'import elegy\n'), ((3749, 3782), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(5, 7, 7)'}), '(size=(5, 7, 7))\n', (3766, 3782), True, 'import numpy as np\n'), ((3795, 3827), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(5,)'}), '(10, size=(5,))\n', (3812, 3827), True, 'import numpy as np\n'), ((4121, 4162), 'numpy.allclose', 'np.allclose', (["history.history['lr']", '(0.001)'], {}), "(history.history['lr'], 0.001)\n", (4132, 4162), True, 'import numpy as np\n'), ((476, 500), 'elegy.nn.Linear', 'elegy.nn.Linear', (['self.n1'], {}), '(self.n1)\n', (491, 500), False, 'import elegy\n'), ((516, 545), 'elegy.nn.BatchNormalization', 'elegy.nn.BatchNormalization', ([], {}), '()\n', (543, 545), False, 'import elegy\n'), ((589, 613), 'elegy.nn.Linear', 'elegy.nn.Linear', (['self.n2'], {}), '(self.n2)\n', (604, 613), False, 'import elegy\n'), ((656, 675), 'elegy.nn.Linear', 'elegy.nn.Linear', (['(10)'], {}), '(10)\n', (671, 675), False, 'import elegy\n'), ((2859, 2873), 'optax.sgd', 'optax.sgd', (['(0.1)'], {}), '(0.1)\n', (2868, 2873), False, 'import optax\n'), ((2887, 2902), 'optax.clip', 'optax.clip', (['(0.5)'], {}), '(0.5)\n', (2897, 2902), False, 'import optax\n'), ((2971, 2992), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 4)'}), '(shape=(3, 4))\n', (2978, 2992), True, 'import numpy as np\n'), ((3246, 3268), 'numpy.all', 'np.all', (['(-0.5 <= params)'], {}), '(-0.5 <= params)\n', (3252, 3268), True, 'import numpy as np\n'), ((3273, 3294), 'numpy.all', 'np.all', (['(params <= 0.5)'], {}), '(params <= 0.5)\n', (3279, 3294), True, 'import numpy as np\n'), ((3409, 3469), 'elegy.losses.SparseCategoricalCrossentropy', 'elegy.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (3451, 3469), False, 'import elegy\n'), ((3491, 3532), 'elegy.metrics.SparseCategoricalAccuracy', 'elegy.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (3530, 3532), False, 'import elegy\n'), ((3589, 3614), 'optax.adamw', 'optax.adamw', (['(1.0)'], {'b1': '(0.95)'}), '(1.0, b1=0.95)\n', (3600, 3614), False, 'import optax\n'), ((3664, 3680), 'jax.numpy.array', 'jnp.array', (['(0.001)'], {}), '(0.001)\n', (3673, 3680), True, 'import jax.numpy as jnp\n')]
|
# -- coding: UTF-8 --
"""
Spyder Editor
This is a temporary script file.
"""
from bs4 import BeautifulSoup
import sys
import os
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import urllib.parse,urllib.request,urllib.error
base="https://nazrul-rachanabali.nltr.org/"
page=urllib.request.urlopen(base).read();
soup=BeautifulSoup(page,'html5lib')
ba=soup.find_all("ul",{"class":["slidedoormenu"]})
#print(ba)
d=soup.div.ul.find_all('a')
#type(d[3])
article_page=(d[3]).get("href")
#soup.div.ul.li.a
newurl_2=base+article_page
page1=urllib.request.urlopen(newurl_2).read()
soup1=BeautifulSoup(page1,'html5lib')
e=soup1.find_all('a')
arr1=[]
arr4=[]
for link in e[1:9]:
f=link.get('href')
f=base+f
arr1.append(f)
arr4.append(link.get_text())
#for k in arr2:
for m in range(0,len(arr4)):
page1=urllib.request.urlopen(arr1[m]).read()
soup1=BeautifulSoup(page1,'html5lib')
x=soup1.find_all('div',id='data')
arr2=[];
arr3=[];
for i in x:
g=i.find_all('a')
for k in g[:-7]:
arr2.append(k.get('href'))
arr3.append(k.get_text())
for z in range(0,len(arr3)):
final_url=base+arr2[z]
#==============================================================================
# page1=urllib.request.urlopen(final_url).read()
# soup1=BeautifulSoup(page1,'html5lib')
# head = soup1.find_all("p",class_="head1")
# headd=head[0].get_text()
#==============================================================================
filenam = "D:\%s\%s"%(arr4[m],arr3[z])
if not os.path.exists(filenam):
os.makedirs(filenam)
for i in range(0,110):
if arr3[z].endswith(" "):
arr3[z]=arr3[z][:-1]
filename = "D:\%s\%s\%s_%d.txt"%(arr4[m],arr3[z],arr3[z],i)
fi = open(filename, "wb")
page1=urllib.request.urlopen(final_url).read()
soup1=BeautifulSoup(page1,'html5lib')
final_url=base+arr2[z]
h=soup1.find_all('div',id="data")
for j in h:
fi.write(j.text.encode("utf-8"))
s=j.text
if not s.split():
break
a,b=final_url.split('1&titleid=')
final_url=a+str(i+1)+"&titleid="+b
print('************'+final_url+'***********')
fi.close()
|
[
"bs4.BeautifulSoup",
"os.path.exists",
"os.makedirs"
] |
[((347, 378), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html5lib"""'], {}), "(page, 'html5lib')\n", (360, 378), False, 'from bs4 import BeautifulSoup\n'), ((611, 643), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page1', '"""html5lib"""'], {}), "(page1, 'html5lib')\n", (624, 643), False, 'from bs4 import BeautifulSoup\n'), ((895, 927), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page1', '"""html5lib"""'], {}), "(page1, 'html5lib')\n", (908, 927), False, 'from bs4 import BeautifulSoup\n'), ((1652, 1675), 'os.path.exists', 'os.path.exists', (['filenam'], {}), '(filenam)\n', (1666, 1675), False, 'import os\n'), ((1689, 1709), 'os.makedirs', 'os.makedirs', (['filenam'], {}), '(filenam)\n', (1700, 1709), False, 'import os\n'), ((2003, 2035), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page1', '"""html5lib"""'], {}), "(page1, 'html5lib')\n", (2016, 2035), False, 'from bs4 import BeautifulSoup\n')]
|
try:
from mitmproxy import controller, proxy
from mitmproxy.proxy.server import ProxyServer
except:
from libmproxy import controller, proxy
from libmproxy.proxy.server import ProxyServer
from plugins import *
from threading import Thread
from core.config.settings import SettingsINI
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class ThreadController(Thread):
def __init__(self,main ,parent=None):
super(ThreadController, self).__init__(parent)
self.main = main
def run(self):
try:
controller.Master.run(self.main)
except KeyboardInterrupt:
self.main.shutdown()
def stop(self):
self.main.shutdown()
class MasterHandler(controller.Master):
def __init__(self, server,session):
controller.Master.__init__(self, server)
self.config = SettingsINI('core/pumpkinProxy.ini')
self.session = session
self.plugins = []
self.initializePlugins()
def run(self):
self.thread = ThreadController(self)
self.thread.start()
def disablePlugin(self,name):
''' disable plugin by name '''
print('plugin:{} status:OFF'.format(name))
for plugin in self.plugins:
if plugin.name == name:
self.plugins.remove(plugin)
def initializePlugins(self):
plugin_classes = plugin.PluginTemplate.__subclasses__()
for p in plugin_classes:
if self.config.get_setting('plugins',p.name,format=bool):
print('plugins::{0:20} status:On'.format(p.name))
self.plugins.append(p())
# initialize logging in all plugins enable
for instance in self.plugins:
instance.init_logger(self.session)
def handle_request(self, flow):
'''
print "-- request --"
print flow.__dict__
print flow.request.__dict__
print flow.request.headers.__dict__
print "--------------"
print
'''
for p in self.plugins:
p.request(flow)
flow.reply()
def handle_response(self, flow):
'''
print
print "-- response --"
print flow.__dict__
print flow.response.__dict__
print flow.response.headers.__dict__
print "--------------"
print
'''
for p in self.plugins:
p.response(flow)
#print flow.__dict__
flow.reply()
|
[
"libmproxy.controller.Master.__init__",
"core.config.settings.SettingsINI",
"libmproxy.controller.Master.run"
] |
[((1841, 1881), 'libmproxy.controller.Master.__init__', 'controller.Master.__init__', (['self', 'server'], {}), '(self, server)\n', (1867, 1881), False, 'from libmproxy import controller, proxy\n'), ((1905, 1941), 'core.config.settings.SettingsINI', 'SettingsINI', (['"""core/pumpkinProxy.ini"""'], {}), "('core/pumpkinProxy.ini')\n", (1916, 1941), False, 'from core.config.settings import SettingsINI\n'), ((1601, 1633), 'libmproxy.controller.Master.run', 'controller.Master.run', (['self.main'], {}), '(self.main)\n', (1622, 1633), False, 'from libmproxy import controller, proxy\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(5)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
print(ricker_2d_kernel)
|
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"astropy.convolution.RickerWavelet2DKernel",
"matplotlib.pyplot.show"
] |
[((126, 150), 'astropy.convolution.RickerWavelet2DKernel', 'RickerWavelet2DKernel', (['(5)'], {}), '(5)\n', (147, 150), False, 'from astropy.convolution import RickerWavelet2DKernel\n'), ((151, 217), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ricker_2d_kernel'], {'interpolation': '"""none"""', 'origin': '"""lower"""'}), "(ricker_2d_kernel, interpolation='none', origin='lower')\n", (161, 217), True, 'import matplotlib.pyplot as plt\n'), ((218, 242), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [pixels]"""'], {}), "('x [pixels]')\n", (228, 242), True, 'import matplotlib.pyplot as plt\n'), ((243, 267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y [pixels]"""'], {}), "('y [pixels]')\n", (253, 267), True, 'import matplotlib.pyplot as plt\n'), ((268, 282), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (280, 282), True, 'import matplotlib.pyplot as plt\n'), ((283, 293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (291, 293), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
import logging
import argparse
import traceback
import os
import sys
from analysis import Analysis
from collector import Collector
from config import DEBUG, DEFAULT_LOG_FILE_DIR
def is_dir(dirname):
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def main():
if DEBUG:
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format=
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
datefmt="%H:%M:%S")
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=lambda x: is_dir(x))
parser.add_argument(
'--test_types',
nargs="+",
choices=['first_match', 'all_matches', 'consecutive_matches'])
parser.add_argument('--log_files', nargs='+', type=argparse.FileType())
parser.set_defaults(
test_types=['first_match', 'all_matches', 'consecutive_matches'])
args = parser.parse_args()
if args.log_files:
logging.info('starting analysis')
Analysis(files=args.log_files).analyze_logs()
logging.info('finished analysis')
else:
logging.info('starting collection')
Collector(args.task).collect()
logging.info('finished collection')
logging.info('starting analysis')
Analysis(logs_dir=DEFAULT_LOG_FILE_DIR).analyze_logs()
if __name__ == '__main__':
main()
|
[
"logging.basicConfig",
"argparse.FileType",
"collector.Collector",
"argparse.ArgumentParser",
"argparse.ArgumentTypeError",
"os.path.isdir",
"analysis.Analysis",
"logging.info"
] |
[((665, 690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (688, 690), False, 'import argparse\n'), ((237, 259), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (250, 259), False, 'import os\n'), ((330, 361), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (356, 361), False, 'import argparse\n'), ((432, 597), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': '"""[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s"""', 'datefmt': '"""%H:%M:%S"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n datefmt='%H:%M:%S')\n", (451, 597), False, 'import logging\n'), ((1129, 1162), 'logging.info', 'logging.info', (['"""starting analysis"""'], {}), "('starting analysis')\n", (1141, 1162), False, 'import logging\n'), ((1227, 1260), 'logging.info', 'logging.info', (['"""finished analysis"""'], {}), "('finished analysis')\n", (1239, 1260), False, 'import logging\n'), ((1279, 1314), 'logging.info', 'logging.info', (['"""starting collection"""'], {}), "('starting collection')\n", (1291, 1314), False, 'import logging\n'), ((1364, 1399), 'logging.info', 'logging.info', (['"""finished collection"""'], {}), "('finished collection')\n", (1376, 1399), False, 'import logging\n'), ((1408, 1441), 'logging.info', 'logging.info', (['"""starting analysis"""'], {}), "('starting analysis')\n", (1420, 1441), False, 'import logging\n'), ((945, 964), 'argparse.FileType', 'argparse.FileType', ([], {}), '()\n', (962, 964), False, 'import argparse\n'), ((1172, 1202), 'analysis.Analysis', 'Analysis', ([], {'files': 'args.log_files'}), '(files=args.log_files)\n', (1180, 1202), False, 'from analysis import Analysis\n'), ((1324, 1344), 'collector.Collector', 'Collector', (['args.task'], {}), '(args.task)\n', (1333, 1344), False, 'from collector import Collector\n'), ((1451, 1490), 'analysis.Analysis', 'Analysis', ([], {'logs_dir': 'DEFAULT_LOG_FILE_DIR'}), '(logs_dir=DEFAULT_LOG_FILE_DIR)\n', (1459, 1490), False, 'from analysis import Analysis\n')]
|
#!/usr/bin/env python
# coding: utf-8
""" """
import typing as t
import attr
import click
@attr.s(frozen=True)
class Memory(object):
banks: t.Tuple[int, ...] = attr.ib()
def balance(self) -> 'Memory':
mem = list(self.banks)
num_banks = len(self.banks)
# Find the amount of blocks to balance - remove them from that bank.
blocks_to_balance = max(mem)
bank_pointer = mem.index(blocks_to_balance)
mem[bank_pointer] = 0
# Rebalance
balance_rounds = 0
while blocks_to_balance > 0:
# Advance the pointer.
bank_pointer = (bank_pointer + 1) % num_banks
mem[bank_pointer] += 1
blocks_to_balance -= 1
return Memory(
banks=tuple(mem)
)
def detect_loop(memory: Memory) -> int:
"""Find how many steps until we detect a loop."""
arrangements_seen = set()
balancer_rounds = 0
while memory not in arrangements_seen:
arrangements_seen.add(memory)
memory = memory.balance()
balancer_rounds += 1
return balancer_rounds, memory
@click.group()
def balancer():
"""Balancing memory like they were spinning tops."""
pass
@balancer.command()
@click.argument('memory_banks', type=click.File())
def distribute(memory_banks):
banks = tuple(map(int, memory_banks.read().split()))
memory = Memory(banks)
steps, memory = detect_loop(memory)
msg = f"Loop found after {steps} balance rounds."
click.secho(msg, fg='green')
@balancer.command()
@click.argument('memory_banks', type=click.File())
def loop_size(memory_banks):
banks = tuple(map(int, memory_banks.read().split()))
memory = Memory(banks)
_, memory = detect_loop(memory)
loop_size, _ = detect_loop(memory)
msg = f"Loop size is {loop_size}."
click.secho(msg, fg='green')
def main():
"""Entrypoint."""
balancer()
if __name__ == '__main__':
main()
|
[
"attr.s",
"click.secho",
"click.group",
"click.File",
"attr.ib"
] |
[((94, 113), 'attr.s', 'attr.s', ([], {'frozen': '(True)'}), '(frozen=True)\n', (100, 113), False, 'import attr\n'), ((1122, 1135), 'click.group', 'click.group', ([], {}), '()\n', (1133, 1135), False, 'import click\n'), ((167, 176), 'attr.ib', 'attr.ib', ([], {}), '()\n', (174, 176), False, 'import attr\n'), ((1504, 1532), 'click.secho', 'click.secho', (['msg'], {'fg': '"""green"""'}), "(msg, fg='green')\n", (1515, 1532), False, 'import click\n'), ((1838, 1866), 'click.secho', 'click.secho', (['msg'], {'fg': '"""green"""'}), "(msg, fg='green')\n", (1849, 1866), False, 'import click\n'), ((1277, 1289), 'click.File', 'click.File', ([], {}), '()\n', (1287, 1289), False, 'import click\n'), ((1592, 1604), 'click.File', 'click.File', ([], {}), '()\n', (1602, 1604), False, 'import click\n')]
|
import glob
import os
import torch
from PIL import Image
from tqdm import tqdm
from ssd.config import cfg
from ssd.data.datasets import COCODataset, VOCDataset
from ssd.modeling.predictor import Predictor
from ssd.modeling.vgg_ssd import build_ssd_model
import argparse
import numpy as np
from ssd.utils.viz import draw_bounding_boxes
def run_demo(cfg, weights_file, iou_threshold, score_threshold, images_dir, output_dir, dataset_type):
if dataset_type == "voc":
class_names = VOCDataset.class_names
elif dataset_type == 'coco':
class_names = COCODataset.class_names
else:
raise NotImplementedError('Not implemented now.')
device = torch.device(cfg.MODEL.DEVICE)
model = build_ssd_model(cfg)
model.load(weights_file)
print('Loaded weights from {}.'.format(weights_file))
model = model.to(device)
predictor = Predictor(cfg=cfg,
model=model,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
device=device)
cpu_device = torch.device("cpu")
image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for image_path in tqdm(image_paths):
image = Image.open(image_path).convert("RGB")
image = np.array(image)
output = predictor.predict(image)
boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
drawn_image = draw_bounding_boxes(image, boxes, labels, scores, class_names).astype(np.uint8)
image_name = os.path.basename(image_path)
Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
def main():
parser = argparse.ArgumentParser(description="SSD Demo.")
parser.add_argument(
"--config-file",
default="configs/ssd300_voc0712.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--weights", default='weights/ssd300_voc0712_mAP77.83.pth',type=str, help="Trained weights.")
parser.add_argument("--iou_threshold", type=float, default=0.5)
parser.add_argument("--score_threshold", type=float, default=0.5)
parser.add_argument("--images_dir", default='demo', type=str, help='Specify a image dir to do prediction.')
parser.add_argument("--output_dir", default='demo/result', type=str, help='Specify a image dir to save predicted images.')
parser.add_argument("--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and coco.')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
print(args)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
print("Loaded configuration file {}".format(args.config_file))
#with open(args.config_file, "r") as cf:
# config_str = "\n" + cf.read()
# print(config_str)
#print("Running with config:\n{}".format(cfg))
run_demo(cfg=cfg,
weights_file=args.weights,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
images_dir=args.images_dir,
output_dir=args.output_dir,
dataset_type=args.dataset_type)
if __name__ == '__main__':
main()
|
[
"os.path.exists",
"PIL.Image.fromarray",
"PIL.Image.open",
"argparse.ArgumentParser",
"ssd.modeling.vgg_ssd.build_ssd_model",
"os.makedirs",
"tqdm.tqdm",
"ssd.config.cfg.freeze",
"os.path.join",
"ssd.modeling.predictor.Predictor",
"numpy.array",
"ssd.config.cfg.merge_from_file",
"ssd.config.cfg.merge_from_list",
"os.path.basename",
"ssd.utils.viz.draw_bounding_boxes",
"torch.device"
] |
[((678, 708), 'torch.device', 'torch.device', (['cfg.MODEL.DEVICE'], {}), '(cfg.MODEL.DEVICE)\n', (690, 708), False, 'import torch\n'), ((721, 741), 'ssd.modeling.vgg_ssd.build_ssd_model', 'build_ssd_model', (['cfg'], {}), '(cfg)\n', (736, 741), False, 'from ssd.modeling.vgg_ssd import build_ssd_model\n'), ((874, 986), 'ssd.modeling.predictor.Predictor', 'Predictor', ([], {'cfg': 'cfg', 'model': 'model', 'iou_threshold': 'iou_threshold', 'score_threshold': 'score_threshold', 'device': 'device'}), '(cfg=cfg, model=model, iou_threshold=iou_threshold,\n score_threshold=score_threshold, device=device)\n', (883, 986), False, 'from ssd.modeling.predictor import Predictor\n'), ((1104, 1123), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1116, 1123), False, 'import torch\n'), ((1283, 1300), 'tqdm.tqdm', 'tqdm', (['image_paths'], {}), '(image_paths)\n', (1287, 1300), False, 'from tqdm import tqdm\n'), ((1764, 1812), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SSD Demo."""'}), "(description='SSD Demo.')\n", (1787, 1812), False, 'import argparse\n'), ((2835, 2872), 'ssd.config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config_file'], {}), '(args.config_file)\n', (2854, 2872), False, 'from ssd.config import cfg\n'), ((2877, 2907), 'ssd.config.cfg.merge_from_list', 'cfg.merge_from_list', (['args.opts'], {}), '(args.opts)\n', (2896, 2907), False, 'from ssd.config import cfg\n'), ((2912, 2924), 'ssd.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (2922, 2924), False, 'from ssd.config import cfg\n'), ((1153, 1186), 'os.path.join', 'os.path.join', (['images_dir', '"""*.jpg"""'], {}), "(images_dir, '*.jpg')\n", (1165, 1186), False, 'import os\n'), ((1200, 1226), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1214, 1226), False, 'import os\n'), ((1236, 1259), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1247, 1259), False, 'import os\n'), ((1372, 1387), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1380, 1387), True, 'import numpy as np\n'), ((1628, 1656), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (1644, 1656), False, 'import os\n'), ((1699, 1735), 'os.path.join', 'os.path.join', (['output_dir', 'image_name'], {}), '(output_dir, image_name)\n', (1711, 1735), False, 'import os\n'), ((1318, 1340), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1328, 1340), False, 'from PIL import Image\n'), ((1527, 1589), 'ssd.utils.viz.draw_bounding_boxes', 'draw_bounding_boxes', (['image', 'boxes', 'labels', 'scores', 'class_names'], {}), '(image, boxes, labels, scores, class_names)\n', (1546, 1589), False, 'from ssd.utils.viz import draw_bounding_boxes\n'), ((1665, 1693), 'PIL.Image.fromarray', 'Image.fromarray', (['drawn_image'], {}), '(drawn_image)\n', (1680, 1693), False, 'from PIL import Image\n')]
|
from typing import Any, Dict, Tuple
import torch
from torch_geometric.nn import GATConv
from torch_sparse import SparseTensor, set_diag
from rgnn_at_scale.aggregation import ROBUST_MEANS
from rgnn_at_scale.models.gcn import GCN
class RGATConv(GATConv):
"""Extension of Pytorch Geometric's `GCNConv` to execute a robust aggregation function:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self, mean='soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
kwargs['in_channels'] = 2 * [kwargs['in_channels']]
super().__init__(**kwargs)
self._mean = ROBUST_MEANS[mean] if mean is not None else None
self._mean_kwargs = mean_kwargs
def forward(self, arguments: Tuple[torch.Tensor, SparseTensor] = None) -> torch.Tensor:
"""Predictions based on the input.
Parameters
----------
arguments : Sequence[torch.Tensor]
[x, edge indices] or [x, edge indices, edge weights], by default None
Returns
-------
torch.Tensor
the output of `GCNConv`.
Raises
------
NotImplementedError
if the arguments are not of length 2 or 3
"""
if len(arguments) == 2:
x, edge_index = arguments
edge_weight = None
elif len(arguments) == 3:
x, edge_index, edge_weight = arguments
else:
raise NotImplementedError("This method is just implemented for two or three arguments")
assert isinstance(edge_index, SparseTensor), 'GAT requires a SparseTensor as input'
assert edge_weight is None, 'The weights must be passed via a SparseTensor'
H, C = self.heads, self.out_channels
assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'
x_l = x_r = self.lin_l(x).view(-1, H, C)
alpha_l = (x_l * self.att_l).sum(dim=-1)
alpha_r = (x_r * self.att_r).sum(dim=-1)
if self.add_self_loops:
edge_index = set_diag(edge_index)
# propagate_type: (x: OptPairTensor, alpha: OptPairTensor)
out = self.propagate(edge_index, x=(x_l, x_r),
alpha=(alpha_l, alpha_r))
alpha = self._alpha * edge_index.storage.value()[:, None]
self._alpha = None
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
attention_matrix = edge_index.set_value(alpha, layout='coo')
attention_matrix.storage._value = attention_matrix.storage._value.squeeze()
x = self.lin_l(x)
if self._mean is not None:
x = self._mean(attention_matrix, x, **self._mean_kwargs)
else:
x = attention_matrix @ x
x += self.bias
return x
class RGAT(GCN):
"""Generic Reliable Graph Neural Network (RGNN) implementation which currently supports a GCN architecture with the
aggregation functions:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
and with the adjacency preprocessings:
- SVD: <NAME>, <NAME>, <NAME>, and <NAME>. All you need is Low
(rank): Defending against adversarial attacks on graphs.
- GDC: <NAME>, <NAME>, and <NAME>. Diffusion Improves Graph Learning.
- Jaccard: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Adversarial examples
for graph data: Deep insights into attack and defense.
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self,
mean: str = 'soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
self._mean_kwargs = dict(mean_kwargs)
self._mean = mean
super().__init__(**kwargs)
assert not self.do_checkpoint, 'Checkpointing is not supported'
def _build_conv_layer(self, in_channels: int, out_channels: int):
return RGATConv(mean=self._mean, mean_kwargs=self._mean_kwargs,
in_channels=in_channels, out_channels=out_channels)
def _cache_if_option_is_set(self, callback, x, edge_idx, edge_weight):
return SparseTensor.from_edge_index(edge_idx, edge_weight, (x.shape[0], x.shape[0])), None
|
[
"torch_sparse.set_diag",
"torch_sparse.SparseTensor.from_edge_index"
] |
[((2451, 2471), 'torch_sparse.set_diag', 'set_diag', (['edge_index'], {}), '(edge_index)\n', (2459, 2471), False, 'from torch_sparse import SparseTensor, set_diag\n'), ((4975, 5052), 'torch_sparse.SparseTensor.from_edge_index', 'SparseTensor.from_edge_index', (['edge_idx', 'edge_weight', '(x.shape[0], x.shape[0])'], {}), '(edge_idx, edge_weight, (x.shape[0], x.shape[0]))\n', (5003, 5052), False, 'from torch_sparse import SparseTensor, set_diag\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw, msgprint
from frappe.utils import nowdate
from frappe.model.document import Document
import six
from six import string_types
class SMSSettings(Document):
pass
def validate_receiver_nos(receiver_list):
validated_receiver_list = []
for d in receiver_list:
if not d:
break
# remove invalid character
for x in [' ','-', '(', ')']:
d = d.replace(x, '')
validated_receiver_list.append(d)
if not validated_receiver_list:
throw(_("Please enter valid mobile nos"))
return validated_receiver_list
@frappe.whitelist()
def get_contact_number(contact_name, ref_doctype, ref_name):
"returns mobile number of the contact"
number = frappe.db.sql("""select mobile_no, phone from tabContact
where name=%s
and exists(
select name from `tabDynamic Link` where link_doctype=%s and link_name=%s
)
""", (contact_name, ref_doctype, ref_name))
return number and (number[0][0] or number[0][1]) or ''
@frappe.whitelist()
def send_sms(receiver_list, msg, sender_name = '', success_msg = True):
import json
if isinstance(receiver_list, string_types):
receiver_list = json.loads(receiver_list)
if not isinstance(receiver_list, list):
receiver_list = [receiver_list]
receiver_list = validate_receiver_nos(receiver_list)
arg = {
'receiver_list' : receiver_list,
'message' : frappe.safe_decode(msg).encode('utf-8'),
'success_msg' : success_msg
}
if frappe.db.get_value('SMS Settings', None, 'sms_gateway_url'):
send_via_gateway(arg)
else:
msgprint(_("Please Update SMS Settings"))
def send_via_gateway(arg):
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
headers = get_headers(ss)
use_json = headers.get("Content-Type") == "application/json"
message = frappe.safe_decode(arg.get('message'))
args = {ss.message_parameter: message}
for d in ss.get("parameters"):
if not d.header:
args[d.parameter] = d.value
success_list = []
for d in arg.get('receiver_list'):
args[ss.receiver_parameter] = d
status = send_request(ss.sms_gateway_url, args, headers, ss.use_post, use_json)
if 200 <= status < 300:
success_list.append(d)
if len(success_list) > 0:
args.update(arg)
create_sms_log(args, success_list)
if arg.get('success_msg'):
frappe.msgprint(_("SMS sent to following numbers: {0}").format("\n" + "\n".join(success_list)))
def get_headers(sms_settings=None):
if not sms_settings:
sms_settings = frappe.get_doc('SMS Settings', 'SMS Settings')
headers={'Accept': "text/plain, text/html, */*"}
for d in sms_settings.get("parameters"):
if d.header == 1:
headers.update({d.parameter: d.value})
return headers
def send_request(gateway_url, params, headers=None, use_post=False, use_json=False):
import requests
if not headers:
headers = get_headers()
kwargs = {"headers": headers}
if use_json:
kwargs["json"] = params
elif use_post:
kwargs["data"] = params
else:
kwargs["params"] = params
if use_post:
response = requests.post(gateway_url, **kwargs)
else:
response = requests.get(gateway_url, **kwargs)
response.raise_for_status()
return response.status_code
# Create SMS Log
# =========================================================
def create_sms_log(args, sent_to):
sl = frappe.new_doc('SMS Log')
sl.sent_on = nowdate()
sl.message = args['message'].decode('utf-8')
sl.no_of_requested_sms = len(args['receiver_list'])
sl.requested_numbers = "\n".join(args['receiver_list'])
sl.no_of_sent_sms = len(sent_to)
sl.sent_to = "\n".join(sent_to)
sl.flags.ignore_permissions = True
sl.save()
|
[
"json.loads",
"requests.post",
"frappe.db.get_value",
"frappe.safe_decode",
"frappe.whitelist",
"frappe._",
"requests.get",
"frappe.utils.nowdate",
"frappe.get_doc",
"frappe.db.sql",
"frappe.new_doc"
] |
[((753, 771), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (769, 771), False, 'import frappe\n'), ((1161, 1179), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (1177, 1179), False, 'import frappe\n'), ((883, 1110), 'frappe.db.sql', 'frappe.db.sql', (['"""select mobile_no, phone from tabContact \n\t\twhere name=%s \n\t\t\tand exists(\n\t\t\t\tselect name from `tabDynamic Link` where link_doctype=%s and link_name=%s\n\t\t\t)\n\t"""', '(contact_name, ref_doctype, ref_name)'], {}), '(\n """select mobile_no, phone from tabContact \n\t\twhere name=%s \n\t\t\tand exists(\n\t\t\t\tselect name from `tabDynamic Link` where link_doctype=%s and link_name=%s\n\t\t\t)\n\t"""\n , (contact_name, ref_doctype, ref_name))\n', (896, 1110), False, 'import frappe\n'), ((1626, 1686), 'frappe.db.get_value', 'frappe.db.get_value', (['"""SMS Settings"""', 'None', '"""sms_gateway_url"""'], {}), "('SMS Settings', None, 'sms_gateway_url')\n", (1645, 1686), False, 'import frappe\n'), ((1797, 1843), 'frappe.get_doc', 'frappe.get_doc', (['"""SMS Settings"""', '"""SMS Settings"""'], {}), "('SMS Settings', 'SMS Settings')\n", (1811, 1843), False, 'import frappe\n'), ((3433, 3458), 'frappe.new_doc', 'frappe.new_doc', (['"""SMS Log"""'], {}), "('SMS Log')\n", (3447, 3458), False, 'import frappe\n'), ((3473, 3482), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (3480, 3482), False, 'from frappe.utils import nowdate\n'), ((1329, 1354), 'json.loads', 'json.loads', (['receiver_list'], {}), '(receiver_list)\n', (1339, 1354), False, 'import json\n'), ((2619, 2665), 'frappe.get_doc', 'frappe.get_doc', (['"""SMS Settings"""', '"""SMS Settings"""'], {}), "('SMS Settings', 'SMS Settings')\n", (2633, 2665), False, 'import frappe\n'), ((3162, 3198), 'requests.post', 'requests.post', (['gateway_url'], {}), '(gateway_url, **kwargs)\n', (3175, 3198), False, 'import requests\n'), ((3219, 3254), 'requests.get', 'requests.get', (['gateway_url'], {}), '(gateway_url, **kwargs)\n', (3231, 3254), False, 'import requests\n'), ((682, 716), 'frappe._', '_', (['"""Please enter valid mobile nos"""'], {}), "('Please enter valid mobile nos')\n", (683, 716), False, 'from frappe import _, throw, msgprint\n'), ((1730, 1761), 'frappe._', '_', (['"""Please Update SMS Settings"""'], {}), "('Please Update SMS Settings')\n", (1731, 1761), False, 'from frappe import _, throw, msgprint\n'), ((1547, 1570), 'frappe.safe_decode', 'frappe.safe_decode', (['msg'], {}), '(msg)\n', (1565, 1570), False, 'import frappe\n'), ((2463, 2502), 'frappe._', '_', (['"""SMS sent to following numbers: {0}"""'], {}), "('SMS sent to following numbers: {0}')\n", (2464, 2502), False, 'from frappe import _, throw, msgprint\n')]
|
# Generated by Django 2.1.5 on 2019-05-04 07:55
import blog.formatChecker
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0040_auto_20190504_0840'),
]
operations = [
migrations.AlterField(
model_name='videos',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/', validators=[blog.formatChecker.file_size]),
),
]
|
[
"django.db.models.FileField"
] |
[((358, 467), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""uploads/"""', 'validators': '[blog.formatChecker.file_size]'}), "(blank=True, null=True, upload_to='uploads/', validators=[\n blog.formatChecker.file_size])\n", (374, 467), False, 'from django.db import migrations, models\n')]
|
import lkml
from time import time_ns
from rich import print
FILE_PATH = "/Users/ladvien/rusty_looker/src/resources/test.lkml"
with open(FILE_PATH, "r") as f:
lookml = f.read()
startTime = time_ns() // 1_000_000
result = lkml.load(lookml)
print(result)
executionTime = (time_ns() // 1_000_000) - startTime
print('Execution time in seconds: ' + str(executionTime))
|
[
"time.time_ns",
"rich.print",
"lkml.load"
] |
[((228, 245), 'lkml.load', 'lkml.load', (['lookml'], {}), '(lookml)\n', (237, 245), False, 'import lkml\n'), ((246, 259), 'rich.print', 'print', (['result'], {}), '(result)\n', (251, 259), False, 'from rich import print\n'), ((194, 203), 'time.time_ns', 'time_ns', ([], {}), '()\n', (201, 203), False, 'from time import time_ns\n'), ((277, 286), 'time.time_ns', 'time_ns', ([], {}), '()\n', (284, 286), False, 'from time import time_ns\n')]
|
from django.db import models
from django.db.models.signals import pre_save, post_save
from core.utils.constants import Constants
from core.utils.data_convertion import DataConversion
class ExcelFile(models.Model):
file_name = models.FileField(upload_to='uploads')
date_created = models.DateTimeField(auto_now_add=True)
activated = models.BooleanField(default=False)
def __str__(self):
return f'File Id{self.id} File name {self.file_name}'
class CsvFile(models.Model):
file_name = models.FileField(upload_to='uploads')
date_uploaded = models.DateTimeField(auto_now_add=True)
activated = models.BooleanField(default=False)
def __str__(self):
return f'File Id{self.id} File name {self.file_name}'
class ViralLoad(models.Model):
laboratory_id = models.CharField(max_length=100, null=True, blank=True)
sector = models.CharField(max_length=30, blank=True, null=True)
number_orig_lab = models.CharField(max_length=100, blank=True, null=True)
province = models.CharField(max_length=100, blank=True, null=True)
district = models.CharField(max_length=100, blank=True, null=True)
health_facility = models.CharField(max_length=100, blank=True, null=True)
patient_name = models.CharField(max_length=100, blank=True, null=True)
gender = models.CharField(max_length=100, blank=True, null=True)
reference = models.CharField(max_length=100, blank=True, null=True)
capture_date = models.DateField(null=True, blank=True)
access_date = models.DateField(null=True, blank=True)
nid = models.CharField(max_length=100, blank=True, null=True)
viral_load = models.CharField(max_length=100, null=True, blank=True)
viral_load_qualitative = models.CharField(
max_length=100, blank=True, null=True)
synced = models.BooleanField(default=False)
formatted_nid = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = 'Viral Load'
verbose_name_plural = 'Viral Loads'
def __str__(self):
return self.patient_name
class Patient(models.Model):
patient_uuid = models.CharField(max_length=500)
#person_id = models.IntegerField()
nid = models.CharField(max_length=100, blank=True, null=True)
patient_name = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return self.patient_name
class Encounter(models.Model):
encounterDatetime = models.DateTimeField(auto_now_add=True)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
encounterType_uuid = models.CharField(
max_length=255, default=Constants().get_uuids().get('encounter_type'))
location_uuid = models.CharField(
max_length=255, default=Constants().get_uuids().get('hpt'))
form_uuid = models.CharField(
max_length=255, default=Constants().get_uuids().get('form'))
synced = models.BooleanField(default=False)
def __str__(self):
return self.patient.name
class Observation(models.Model):
patient = models.ForeignKey(
Patient, on_delete=models.CASCADE)
obsDateTime = models.DateTimeField(auto_now_add=True)
concept = models.CharField(max_length=255)
value_numeric = models.PositiveIntegerField(null=True, blank=True)
value_coded = models.PositiveIntegerField(null=True, blank=True)
value_datetime = models.DateTimeField(null=True, blank=True)
encounter = models.ForeignKey(Encounter, on_delete=models.CASCADE)
location = models.CharField(
max_length=255, default=Constants().get_uuids().get('hpt'))
value = models.CharField(max_length=255)
voided = models.BooleanField(default=False)
synced = models.BooleanField(default=False)
def __str__(self):
return self.id
# def insert_formatted_nid(sender, instance, created, *args, **kwargs):
# if created:
# instance.formatted_nid = DataConversion.format_nid(instance.nid)
# print(instance.formatted_nid)
# post_save.connect(insert_formatted_nid, sender=ViralLoad)
|
[
"django.db.models.DateField",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.db.models.PositiveIntegerField",
"core.utils.constants.Constants",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] |
[((233, 270), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""uploads"""'}), "(upload_to='uploads')\n", (249, 270), False, 'from django.db import models\n'), ((290, 329), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (310, 329), False, 'from django.db import models\n'), ((346, 380), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (365, 380), False, 'from django.db import models\n'), ((514, 551), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""uploads"""'}), "(upload_to='uploads')\n", (530, 551), False, 'from django.db import models\n'), ((572, 611), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (592, 611), False, 'from django.db import models\n'), ((628, 662), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (647, 662), False, 'from django.db import models\n'), ((802, 857), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (818, 857), False, 'from django.db import models\n'), ((871, 925), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(True)', 'null': '(True)'}), '(max_length=30, blank=True, null=True)\n', (887, 925), False, 'from django.db import models\n'), ((948, 1003), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (964, 1003), False, 'from django.db import models\n'), ((1019, 1074), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1035, 1074), False, 'from django.db import models\n'), ((1090, 1145), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1106, 1145), False, 'from django.db import models\n'), ((1168, 1223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1184, 1223), False, 'from django.db import models\n'), ((1243, 1298), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1259, 1298), False, 'from django.db import models\n'), ((1312, 1367), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1328, 1367), False, 'from django.db import models\n'), ((1384, 1439), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1400, 1439), False, 'from django.db import models\n'), ((1459, 1498), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1475, 1498), False, 'from django.db import models\n'), ((1517, 1556), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1533, 1556), False, 'from django.db import models\n'), ((1567, 1622), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1583, 1622), False, 'from django.db import models\n'), ((1640, 1695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1656, 1695), False, 'from django.db import models\n'), ((1725, 1780), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1741, 1780), False, 'from django.db import models\n'), ((1803, 1837), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1822, 1837), False, 'from django.db import models\n'), ((1858, 1913), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1874, 1913), False, 'from django.db import models\n'), ((2118, 2150), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (2134, 2150), False, 'from django.db import models\n'), ((2200, 2255), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (2216, 2255), False, 'from django.db import models\n'), ((2275, 2330), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (2291, 2330), False, 'from django.db import models\n'), ((2445, 2484), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2465, 2484), False, 'from django.db import models\n'), ((2499, 2551), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Patient'], {'on_delete': 'models.CASCADE'}), '(Patient, on_delete=models.CASCADE)\n', (2516, 2551), False, 'from django.db import models\n'), ((2896, 2930), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2915, 2930), False, 'from django.db import models\n'), ((3037, 3089), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Patient'], {'on_delete': 'models.CASCADE'}), '(Patient, on_delete=models.CASCADE)\n', (3054, 3089), False, 'from django.db import models\n'), ((3117, 3156), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3137, 3156), False, 'from django.db import models\n'), ((3171, 3203), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3187, 3203), False, 'from django.db import models\n'), ((3224, 3274), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3251, 3274), False, 'from django.db import models\n'), ((3293, 3343), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3320, 3343), False, 'from django.db import models\n'), ((3365, 3408), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3385, 3408), False, 'from django.db import models\n'), ((3425, 3479), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Encounter'], {'on_delete': 'models.CASCADE'}), '(Encounter, on_delete=models.CASCADE)\n', (3442, 3479), False, 'from django.db import models\n'), ((3593, 3625), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3609, 3625), False, 'from django.db import models\n'), ((3639, 3673), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3658, 3673), False, 'from django.db import models\n'), ((3687, 3721), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3706, 3721), False, 'from django.db import models\n'), ((2627, 2638), 'core.utils.constants.Constants', 'Constants', ([], {}), '()\n', (2636, 2638), False, 'from core.utils.constants import Constants\n'), ((2744, 2755), 'core.utils.constants.Constants', 'Constants', ([], {}), '()\n', (2753, 2755), False, 'from core.utils.constants import Constants\n'), ((2846, 2857), 'core.utils.constants.Constants', 'Constants', ([], {}), '()\n', (2855, 2857), False, 'from core.utils.constants import Constants\n'), ((3545, 3556), 'core.utils.constants.Constants', 'Constants', ([], {}), '()\n', (3554, 3556), False, 'from core.utils.constants import Constants\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
import re
from urllib import quote
class Url(object):
unsafe_or_hash = r'(?:(?:(?P<unsafe>unsafe)|(?P<hash>.+?))/)?'
debug = '(?:(?P<debug>debug)/)?'
meta = '(?:(?P<meta>meta)/)?'
trim = '(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\d+)?)/)?'
crop = '(?:(?P<crop_left>\d+)x(?P<crop_top>\d+):(?P<crop_right>\d+)x(?P<crop_bottom>\d+)/)?'
fit_in = '(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?'
dimensions = '(?:(?P<horizontal_flip>-)?(?P<width>(?:\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\d+|orig))?/)?'
halign = r'(?:(?P<halign>left|right|center)/)?'
valign = r'(?:(?P<valign>top|bottom|middle)/)?'
smart = r'(?:(?P<smart>smart)/)?'
filters = r'(?:filters:(?P<filters>.+?\))/)?'
image = r'(?P<image>.+)'
compiled_regex = None
@classmethod
def regex(cls, has_unsafe_or_hash=True):
reg = ['/?']
if has_unsafe_or_hash:
reg.append(cls.unsafe_or_hash)
reg.append(cls.debug)
reg.append(cls.meta)
reg.append(cls.trim)
reg.append(cls.crop)
reg.append(cls.fit_in)
reg.append(cls.dimensions)
reg.append(cls.halign)
reg.append(cls.valign)
reg.append(cls.smart)
reg.append(cls.filters)
reg.append(cls.image)
return ''.join(reg)
@classmethod
def parse_decrypted(cls, url):
if cls.compiled_regex:
reg = cls.compiled_regex
else:
reg = cls.compiled_regex = re.compile(cls.regex(has_unsafe_or_hash=False))
result = reg.match(url)
if not result:
return None
result = result.groupdict()
int_or_0 = lambda value: 0 if value is None else int(value)
values = {
'debug': result['debug'] == 'debug',
'meta': result['meta'] == 'meta',
'trim': result['trim'],
'crop': {
'left': int_or_0(result['crop_left']),
'top': int_or_0(result['crop_top']),
'right': int_or_0(result['crop_right']),
'bottom': int_or_0(result['crop_bottom'])
},
'adaptive': result['adaptive'] == 'adaptive',
'full': result['full'] == 'full',
'fit_in': result['fit_in'] == 'fit-in',
'width': result['width'] == 'orig' and 'orig' or int_or_0(result['width']),
'height': result['height'] == 'orig' and 'orig' or int_or_0(result['height']),
'horizontal_flip': result['horizontal_flip'] == '-',
'vertical_flip': result['vertical_flip'] == '-',
'halign': result['halign'] or 'center',
'valign': result['valign'] or 'middle',
'smart': result['smart'] == 'smart',
'filters': result['filters'] or '',
'image': 'image' in result and result['image'] or None
}
return values
@classmethod # NOQA
def generate_options(cls,
debug=False,
width=0,
height=0,
smart=False,
meta=False,
trim=None,
adaptive=False,
full=False,
fit_in=False,
horizontal_flip=False,
vertical_flip=False,
halign='center',
valign='middle',
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
filters=None):
url = []
if debug:
url.append('debug')
if meta:
url.append('meta')
if trim:
if isinstance(trim, bool):
url.append('trim')
else:
url.append('trim:%s' % trim)
crop = crop_left or crop_top or crop_right or crop_bottom
if crop:
url.append('%sx%s:%sx%s' % (
crop_left,
crop_top,
crop_right,
crop_bottom
))
if fit_in:
fit_ops = []
if adaptive:
fit_ops.append('adaptive')
if full:
fit_ops.append('full')
fit_ops.append('fit-in')
url.append('-'.join(fit_ops))
if horizontal_flip:
width = '-%s' % width
if vertical_flip:
height = '-%s' % height
if width or height:
url.append('%sx%s' % (width, height))
if halign != 'center':
url.append(halign)
if valign != 'middle':
url.append(valign)
if smart:
url.append('smart')
if filters:
url.append('filters:%s' % filters)
return '/'.join(url)
@classmethod
def encode_url(kls, url):
return quote(url, '/:?%=&()~",\'$')
|
[
"urllib.quote"
] |
[((5245, 5273), 'urllib.quote', 'quote', (['url', '"""/:?%=&()~",\'$"""'], {}), '(url, \'/:?%=&()~",\\\'$\')\n', (5250, 5273), False, 'from urllib import quote\n')]
|
import constants as c
from deck import Deck
from player import Human, RandomAI
class Game:
def __init__(self):
self.deck = None
self.players = None
self.scores = None
self.rounds_left = None
self.game_over = False
def new(self):
self.game_over = False
self.rounds_left = c.ROUNDS
self.players = [Human(), RandomAI()]
self.scores = {str(k): 0 for k in self.players}
self.new_round()
def new_round(self):
self.deck = Deck()
self.deck.shuffle()
for player in self.players:
player.hand = []
self.deal(player=player, quantity=c.INITIAL_HAND)
def deal(self, player, quantity=1):
for card in self.deck.draw(quantity):
player.hand.append(card)
def turn(self, player):
score = None
action = player.play()
if action == 'hit':
self.deal(player)
if player.get_score() > c.POINTS:
score = 0
elif action == 'stay':
score = player.get_score()
return score
def balance(self, scores):
print('----- Scores -----')
print(f'Round scores (points made in round): {scores}')
tie = True
winner = scores.popitem()
for k, v in scores.items():
if v > winner[1]:
winner = (k, v)
tie = False
elif v < winner[1]:
tie = False
if not tie:
self.scores[winner[0]] += 1
print(f'General scores (rounds won by each): {self.scores}')
def run(self):
# while there are still rounds left
while self.rounds_left:
# set round scores to empty
scores = {}
# for each player, do a whole turn, which can involve
# multiple actions, i.e., two or more "hits"
for player in self.players:
print(f'---- {str(player)} turn ----')
# turn is not over until we receive a score,
# whether it's 0, which means it overstepped
# or 0 < x <= 21
turn_over = False
while not turn_over:
# do a turn until we get a score, if we don't
# have a score, that means that the engine
# "hit" and didn't overstepped, so it's still
# its turn.
score = self.turn(player)
if score is not None:
print(f'Hand: {[str(e) for e in player.hand]}, points: {player.get_score()}')
# store scores for this player in this round
# and hand control over
scores[str(player)] = score
turn_over = True
# do a balance after finishing round
self.balance(scores)
# begin new round and reduce rounds left by 1
self.new_round()
self.rounds_left -= 1
print(f'Rounds left: {self.rounds_left}')
if __name__ == '__main__':
g = Game()
g.new()
g.run()
|
[
"deck.Deck",
"player.RandomAI",
"player.Human"
] |
[((540, 546), 'deck.Deck', 'Deck', ([], {}), '()\n', (544, 546), False, 'from deck import Deck\n'), ((387, 394), 'player.Human', 'Human', ([], {}), '()\n', (392, 394), False, 'from player import Human, RandomAI\n'), ((396, 406), 'player.RandomAI', 'RandomAI', ([], {}), '()\n', (404, 406), False, 'from player import Human, RandomAI\n')]
|
"""
Clustar module for fitting-related methods.
This module is designed for the 'ClustarData' object. All listed methods take
an input parameter of a 'ClustarData' object and return a 'ClustarData' object
after processing the method. As a result, all changes are localized within the
'ClustarData' object.
Visit <https://clustar.github.io/> for additional information.
"""
from clustar import graph
from scipy import ndimage, stats
from shapely import affinity, geometry
import numpy as np
def compute_fit(cd):
"""
Computes the normalized bivariate gaussian fit for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
i = 0
while i < len(cd.groups):
group = cd.groups[i]
try:
rv = stats.multivariate_normal([group.stats.x_bar,
group.stats.y_bar],
group.stats.covariance_matrix)
except ValueError:
del cd.groups[i]
continue
bvg = rv.pdf(group.image.pos)
bvg *= np.max(group.image.data) / np.max(bvg)
group.res.data = 1 - (bvg / group.image.data)
group.fit.bvg = bvg
group.fit.rv = rv
i += 1
return cd
def compute_ellipse(cd):
"""
Computes the ellipse parameters and localized residuals for the 'Group'
objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
a = group.stats.x_len / 2
b = group.stats.y_len / 2
theta = np.linspace(0, np.pi * 2, 360)
r = a * b / np.sqrt((b * np.cos(theta)) ** 2 +
(a * np.sin(theta)) ** 2)
xy = np.stack([group.stats.x_bar + r * np.cos(theta),
group.stats.y_bar + r * np.sin(theta)], 1)
ellipse = affinity.rotate(geometry.Polygon(xy),
group.stats.degrees,
(group.stats.x_bar, group.stats.y_bar))
pos = np.array([[i, j] for i in range(group.image.data.shape[0])
for j in range(group.image.data.shape[1])])
inside = np.array([p for p in pos
if ellipse.contains(geometry.Point(p))])
outside = np.array([p for p in pos
if not ellipse.contains(geometry.Point(p))])
group.fit.ellipse = ellipse
group.res.pos = pos
group.res.inside = inside
group.res.outside = outside
return cd
def compute_metrics(cd):
"""
Computes the evaluation metrics for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = group.res
output = np.abs(res.data[res.inside[:, 0], res.inside[:, 1]])
output[output < 0] = 0
output[output > 1] = 1
bias = group.image.data[res.inside[:, 0], res.inside[:, 1]]
group.metrics.standard_deviation = np.std(output)
group.metrics.variance = group.metrics.standard_deviation ** 2
group.metrics.average = np.mean(output)
group.metrics.weighted_average = np.average(output, weights=bias)
group.res.output = output
return cd
def compute_peaks(cd):
"""
Computes the number of peaks along the major and minor axes for the
'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = np.array(group.res.data, copy=True)
res_out = group.res.outside
res[res_out[:, 0], res_out[:, 1]] = 0
r_major = np.abs(ndimage.rotate(res, group.stats.degrees))
r_minor = np.abs(ndimage.rotate(res, group.stats.degrees + 90))
major_idx = graph.critical_points(r_major)
minor_idx = graph.critical_points(r_minor)
major_idx = [major_idx[i] for i in range(len(major_idx))
if i % 2 == 0]
minor_idx = [minor_idx[i] for i in range(len(minor_idx))
if i % 2 == 0]
group.fit.major_peaks = len(major_idx)
group.fit.minor_peaks = len(minor_idx)
group.res.clean = res
return cd
def validate(cd):
"""
Determines which 'Group' objects are flagged for manual review by using
the specified validation parameters.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
attribute = cd.params.metric.lower()
threshold = cd.params.threshold
for group in cd.groups:
metric = getattr(group.metrics, attribute)
if metric > threshold:
group.flag = True
cd.flag = True
if cd.params.evaluate_peaks and \
((group.fit.major_peaks in [2, 4]) or
(group.fit.minor_peaks in [2, 4])):
group.flag = False
cd.flag = False
return cd
|
[
"numpy.abs",
"numpy.mean",
"numpy.average",
"scipy.stats.multivariate_normal",
"numpy.max",
"shapely.geometry.Point",
"numpy.array",
"numpy.linspace",
"shapely.geometry.Polygon",
"numpy.cos",
"numpy.std",
"numpy.sin",
"scipy.ndimage.rotate",
"clustar.graph.critical_points"
] |
[((1775, 1805), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(360)'], {}), '(0, np.pi * 2, 360)\n', (1786, 1805), True, 'import numpy as np\n'), ((3074, 3126), 'numpy.abs', 'np.abs', (['res.data[res.inside[:, 0], res.inside[:, 1]]'], {}), '(res.data[res.inside[:, 0], res.inside[:, 1]])\n', (3080, 3126), True, 'import numpy as np\n'), ((3301, 3315), 'numpy.std', 'np.std', (['output'], {}), '(output)\n', (3307, 3315), True, 'import numpy as np\n'), ((3419, 3434), 'numpy.mean', 'np.mean', (['output'], {}), '(output)\n', (3426, 3434), True, 'import numpy as np\n'), ((3476, 3508), 'numpy.average', 'np.average', (['output'], {'weights': 'bias'}), '(output, weights=bias)\n', (3486, 3508), True, 'import numpy as np\n'), ((3888, 3923), 'numpy.array', 'np.array', (['group.res.data'], {'copy': '(True)'}), '(group.res.data, copy=True)\n', (3896, 3923), True, 'import numpy as np\n'), ((4175, 4205), 'clustar.graph.critical_points', 'graph.critical_points', (['r_major'], {}), '(r_major)\n', (4196, 4205), False, 'from clustar import graph\n'), ((4226, 4256), 'clustar.graph.critical_points', 'graph.critical_points', (['r_minor'], {}), '(r_minor)\n', (4247, 4256), False, 'from clustar import graph\n'), ((864, 965), 'scipy.stats.multivariate_normal', 'stats.multivariate_normal', (['[group.stats.x_bar, group.stats.y_bar]', 'group.stats.covariance_matrix'], {}), '([group.stats.x_bar, group.stats.y_bar], group.\n stats.covariance_matrix)\n', (889, 965), False, 'from scipy import ndimage, stats\n'), ((1191, 1215), 'numpy.max', 'np.max', (['group.image.data'], {}), '(group.image.data)\n', (1197, 1215), True, 'import numpy as np\n'), ((1218, 1229), 'numpy.max', 'np.max', (['bvg'], {}), '(bvg)\n', (1224, 1229), True, 'import numpy as np\n'), ((2079, 2099), 'shapely.geometry.Polygon', 'geometry.Polygon', (['xy'], {}), '(xy)\n', (2095, 2099), False, 'from shapely import affinity, geometry\n'), ((4040, 4080), 'scipy.ndimage.rotate', 'ndimage.rotate', (['res', 'group.stats.degrees'], {}), '(res, group.stats.degrees)\n', (4054, 4080), False, 'from scipy import ndimage, stats\n'), ((4107, 4152), 'scipy.ndimage.rotate', 'ndimage.rotate', (['res', '(group.stats.degrees + 90)'], {}), '(res, group.stats.degrees + 90)\n', (4121, 4152), False, 'from scipy import ndimage, stats\n'), ((1963, 1976), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1969, 1976), True, 'import numpy as np\n'), ((2025, 2038), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2031, 2038), True, 'import numpy as np\n'), ((2461, 2478), 'shapely.geometry.Point', 'geometry.Point', (['p'], {}), '(p)\n', (2475, 2478), False, 'from shapely import affinity, geometry\n'), ((1840, 1853), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1846, 1853), True, 'import numpy as np\n'), ((1895, 1908), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1901, 1908), True, 'import numpy as np\n'), ((2577, 2594), 'shapely.geometry.Point', 'geometry.Point', (['p'], {}), '(p)\n', (2591, 2594), False, 'from shapely import affinity, geometry\n')]
|
import unittest
from datetime import datetime, timezone
from typing import List
from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class NotKillingItselfAITest(unittest.TestCase):
def test_ai_should_choose_the_own_non_killing_itself_action(self):
player1 = Player(1, 0, 0, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself(self):
player1 = Player(1, 0, 1, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself2(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_in_turn_6(self):
player1 = Player(1, 0, 4, Direction.up, 3, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
game_service.turn.turn_ctr = 6
sut = NotKillingItselfAI(player1, [], 4, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(Action.speed_up in actions)
self.assertTrue(len(actions) == 3)
def test_ai_should_not_choose_speed_up_if_max_speed_is_allready_reached(self):
MAX_SPEED = 3
player1 = Player(1, 0, 4, Direction.up, MAX_SPEED, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], MAX_SPEED, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_calc_action_with_max_distance(self):
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_calc_all_action_with_max_distance_with_max_worse_distance(self):
MAX_WORSE_DISTANCE = 1
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, MAX_WORSE_DISTANCE, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.speed_up in actions)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 3)
def test_get_information(self):
player = Player(1, 0, 4, Direction.up, 1, True, "")
sut = NotKillingItselfAI(player, [], 3, 1, 3)
expected = "max_speed=3, max_worse_distance=1, depth=3"
result = sut.get_information()
self.assertEqual(expected, result)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_with_depth_greater_than_one(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_greater_than_one_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_correct_list_with_depth_three_and_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_three_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_best_list_of_actions_by_depth_from_lower_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_by_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_in_lowest_possible_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
|
[
"datetime.datetime",
"chillow.model.game.Game",
"chillow.model.cell.Cell",
"chillow.service.game_service.GameService",
"chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI",
"chillow.model.player.Player"
] |
[((545, 587), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(0)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 0, 0, Direction.up, 1, True, '')\n", (551, 587), False, 'from chillow.model.player import Player\n'), ((606, 650), 'chillow.model.player.Player', 'Player', (['(2)', '(4)', '(4)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 4, 4, Direction.down, 3, True, '')\n", (612, 650), False, 'from chillow.model.player import Player\n'), ((1017, 1066), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (1025, 1066), False, 'from datetime import datetime, timezone\n'), ((1082, 1123), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (1086, 1123), False, 'from chillow.model.game import Game\n'), ((1147, 1164), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (1158, 1164), False, 'from chillow.service.game_service import GameService\n'), ((1179, 1219), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(3)'], {}), '(player1, [], 3, 0, 3)\n', (1197, 1219), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((1498, 1540), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(1)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 0, 1, Direction.up, 1, True, '')\n", (1504, 1540), False, 'from chillow.model.player import Player\n'), ((1559, 1603), 'chillow.model.player.Player', 'Player', (['(2)', '(4)', '(4)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 4, 4, Direction.down, 3, True, '')\n", (1565, 1603), False, 'from chillow.model.player import Player\n'), ((1970, 2019), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (1978, 2019), False, 'from datetime import datetime, timezone\n'), ((2035, 2076), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (2039, 2076), False, 'from chillow.model.game import Game\n'), ((2100, 2117), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (2111, 2117), False, 'from chillow.service.game_service import GameService\n'), ((2132, 2172), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(3)'], {}), '(player1, [], 3, 0, 3)\n', (2150, 2172), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((2510, 2552), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (2516, 2552), False, 'from chillow.model.player import Player\n'), ((2571, 2615), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (2577, 2615), False, 'from chillow.model.player import Player\n'), ((2982, 3031), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (2990, 3031), False, 'from datetime import datetime, timezone\n'), ((3047, 3088), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (3051, 3088), False, 'from chillow.model.game import Game\n'), ((3112, 3129), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (3123, 3129), False, 'from chillow.service.game_service import GameService\n'), ((3144, 3184), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(3)'], {}), '(player1, [], 3, 0, 3)\n', (3162, 3184), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((3526, 3568), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(4)', 'Direction.up', '(3)', '(True)', '""""""'], {}), "(1, 0, 4, Direction.up, 3, True, '')\n", (3532, 3568), False, 'from chillow.model.player import Player\n'), ((3587, 3631), 'chillow.model.player.Player', 'Player', (['(2)', '(0)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 0, 1, Direction.down, 3, True, '')\n", (3593, 3631), False, 'from chillow.model.player import Player\n'), ((3997, 4046), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (4005, 4046), False, 'from datetime import datetime, timezone\n'), ((4062, 4103), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (4066, 4103), False, 'from chillow.model.game import Game\n'), ((4127, 4144), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (4138, 4144), False, 'from chillow.service.game_service import GameService\n'), ((4198, 4238), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(4)', '(0)', '(3)'], {}), '(player1, [], 4, 0, 3)\n', (4216, 4238), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((4643, 4693), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(4)', 'Direction.up', 'MAX_SPEED', '(True)', '""""""'], {}), "(1, 0, 4, Direction.up, MAX_SPEED, True, '')\n", (4649, 4693), False, 'from chillow.model.player import Player\n'), ((4712, 4756), 'chillow.model.player.Player', 'Player', (['(2)', '(0)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 0, 1, Direction.down, 3, True, '')\n", (4718, 4756), False, 'from chillow.model.player import Player\n'), ((5122, 5171), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (5130, 5171), False, 'from datetime import datetime, timezone\n'), ((5187, 5228), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (5191, 5228), False, 'from chillow.model.game import Game\n'), ((5252, 5269), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (5263, 5269), False, 'from chillow.service.game_service import GameService\n'), ((5284, 5332), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', 'MAX_SPEED', '(0)', '(3)'], {}), '(player1, [], MAX_SPEED, 0, 3)\n', (5302, 5332), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((5640, 5682), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(4)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 0, 4, Direction.up, 1, True, '')\n", (5646, 5682), False, 'from chillow.model.player import Player\n'), ((5701, 5745), 'chillow.model.player.Player', 'Player', (['(2)', '(0)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 0, 1, Direction.down, 3, True, '')\n", (5707, 5745), False, 'from chillow.model.player import Player\n'), ((6111, 6160), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (6119, 6160), False, 'from datetime import datetime, timezone\n'), ((6176, 6217), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (6180, 6217), False, 'from chillow.model.game import Game\n'), ((6241, 6258), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (6252, 6258), False, 'from chillow.service.game_service import GameService\n'), ((6273, 6313), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(3)'], {}), '(player1, [], 3, 0, 3)\n', (6291, 6313), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((6905, 6947), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(4)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 0, 4, Direction.up, 1, True, '')\n", (6911, 6947), False, 'from chillow.model.player import Player\n'), ((6966, 7010), 'chillow.model.player.Player', 'Player', (['(2)', '(4)', '(4)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 4, 4, Direction.down, 3, True, '')\n", (6972, 7010), False, 'from chillow.model.player import Player\n'), ((7376, 7425), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (7384, 7425), False, 'from datetime import datetime, timezone\n'), ((7441, 7482), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (7445, 7482), False, 'from chillow.model.game import Game\n'), ((7506, 7523), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (7517, 7523), False, 'from chillow.service.game_service import GameService\n'), ((7538, 7595), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', 'MAX_WORSE_DISTANCE', '(3)'], {}), '(player1, [], 3, MAX_WORSE_DISTANCE, 3)\n', (7556, 7595), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((8213, 8255), 'chillow.model.player.Player', 'Player', (['(1)', '(0)', '(4)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 0, 4, Direction.up, 1, True, '')\n", (8219, 8255), False, 'from chillow.model.player import Player\n'), ((8270, 8309), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player', '[]', '(3)', '(1)', '(3)'], {}), '(player, [], 3, 1, 3)\n', (8288, 8309), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((8589, 8631), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (8595, 8631), False, 'from chillow.model.player import Player\n'), ((8650, 8694), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (8656, 8694), False, 'from chillow.model.player import Player\n'), ((9103, 9152), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (9111, 9152), False, 'from datetime import datetime, timezone\n'), ((9168, 9209), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (9172, 9209), False, 'from chillow.model.game import Game\n'), ((9233, 9250), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (9244, 9250), False, 'from chillow.service.game_service import GameService\n'), ((9265, 9305), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(2)'], {}), '(player1, [], 3, 0, 2)\n', (9283, 9305), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((9600, 9642), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (9606, 9642), False, 'from chillow.model.player import Player\n'), ((9661, 9705), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (9667, 9705), False, 'from chillow.model.player import Player\n'), ((10264, 10313), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (10272, 10313), False, 'from datetime import datetime, timezone\n'), ((10329, 10370), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (10333, 10370), False, 'from chillow.model.game import Game\n'), ((10394, 10411), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (10405, 10411), False, 'from chillow.service.game_service import GameService\n'), ((10426, 10466), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(2)'], {}), '(player1, [], 3, 0, 2)\n', (10444, 10466), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((10695, 10737), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (10701, 10737), False, 'from chillow.model.player import Player\n'), ((10756, 10800), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (10762, 10800), False, 'from chillow.model.player import Player\n'), ((11359, 11408), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (11367, 11408), False, 'from datetime import datetime, timezone\n'), ((11424, 11465), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (11428, 11465), False, 'from chillow.model.game import Game\n'), ((11489, 11506), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (11500, 11506), False, 'from chillow.service.game_service import GameService\n'), ((11521, 11561), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(3)'], {}), '(player1, [], 3, 0, 3)\n', (11539, 11561), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((11845, 11887), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (11851, 11887), False, 'from chillow.model.player import Player\n'), ((11906, 11950), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (11912, 11950), False, 'from chillow.model.player import Player\n'), ((12509, 12558), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (12517, 12558), False, 'from datetime import datetime, timezone\n'), ((12574, 12615), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (12578, 12615), False, 'from chillow.model.game import Game\n'), ((12639, 12656), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (12650, 12656), False, 'from chillow.service.game_service import GameService\n'), ((12671, 12711), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(3)'], {}), '(player1, [], 3, 0, 3)\n', (12689, 12711), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((12936, 12978), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (12942, 12978), False, 'from chillow.model.player import Player\n'), ((12997, 13041), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (13003, 13041), False, 'from chillow.model.player import Player\n'), ((13600, 13649), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (13608, 13649), False, 'from datetime import datetime, timezone\n'), ((13665, 13706), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (13669, 13706), False, 'from chillow.model.game import Game\n'), ((13730, 13747), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (13741, 13747), False, 'from chillow.service.game_service import GameService\n'), ((13762, 13802), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(5)'], {}), '(player1, [], 3, 0, 5)\n', (13780, 13802), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((14077, 14119), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (14083, 14119), False, 'from chillow.model.player import Player\n'), ((14138, 14182), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (14144, 14182), False, 'from chillow.model.player import Player\n'), ((14732, 14781), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (14740, 14781), False, 'from datetime import datetime, timezone\n'), ((14797, 14838), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (14801, 14838), False, 'from chillow.model.game import Game\n'), ((14862, 14879), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (14873, 14879), False, 'from chillow.service.game_service import GameService\n'), ((14894, 14934), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(5)'], {}), '(player1, [], 3, 0, 5)\n', (14912, 14934), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((15225, 15267), 'chillow.model.player.Player', 'Player', (['(1)', '(1)', '(2)', 'Direction.up', '(1)', '(True)', '""""""'], {}), "(1, 1, 2, Direction.up, 1, True, '')\n", (15231, 15267), False, 'from chillow.model.player import Player\n'), ((15286, 15330), 'chillow.model.player.Player', 'Player', (['(2)', '(1)', '(1)', 'Direction.down', '(3)', '(True)', '""""""'], {}), "(2, 1, 1, Direction.down, 3, True, '')\n", (15292, 15330), False, 'from chillow.model.player import Player\n'), ((15890, 15939), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)', '(12)', '(5)', '(13)', '(0)', 'timezone.utc'], {}), '(2020, 10, 1, 12, 5, 13, 0, timezone.utc)\n', (15898, 15939), False, 'from datetime import datetime, timezone\n'), ((15955, 15996), 'chillow.model.game.Game', 'Game', (['(5)', '(5)', 'cells', 'players', '(2)', '(True)', 'time'], {}), '(5, 5, cells, players, 2, True, time)\n', (15959, 15996), False, 'from chillow.model.game import Game\n'), ((16020, 16037), 'chillow.service.game_service.GameService', 'GameService', (['game'], {}), '(game)\n', (16031, 16037), False, 'from chillow.service.game_service import GameService\n'), ((16052, 16092), 'chillow.service.ai.not_killing_itself_ai.NotKillingItselfAI', 'NotKillingItselfAI', (['player1', '[]', '(3)', '(0)', '(5)'], {}), '(player1, [], 3, 0, 5)\n', (16070, 16092), False, 'from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI\n'), ((706, 721), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (710, 721), False, 'from chillow.model.cell import Cell\n'), ((723, 729), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (727, 729), False, 'from chillow.model.cell import Cell\n'), ((731, 737), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (735, 737), False, 'from chillow.model.cell import Cell\n'), ((739, 745), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (743, 745), False, 'from chillow.model.cell import Cell\n'), ((747, 753), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (751, 753), False, 'from chillow.model.cell import Cell\n'), ((774, 780), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (778, 780), False, 'from chillow.model.cell import Cell\n'), ((782, 788), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (786, 788), False, 'from chillow.model.cell import Cell\n'), ((790, 796), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (794, 796), False, 'from chillow.model.cell import Cell\n'), ((798, 804), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (802, 804), False, 'from chillow.model.cell import Cell\n'), ((806, 812), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (810, 812), False, 'from chillow.model.cell import Cell\n'), ((833, 839), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (837, 839), False, 'from chillow.model.cell import Cell\n'), ((841, 847), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (845, 847), False, 'from chillow.model.cell import Cell\n'), ((849, 855), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (853, 855), False, 'from chillow.model.cell import Cell\n'), ((857, 863), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (861, 863), False, 'from chillow.model.cell import Cell\n'), ((865, 871), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (869, 871), False, 'from chillow.model.cell import Cell\n'), ((892, 898), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (896, 898), False, 'from chillow.model.cell import Cell\n'), ((900, 906), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (904, 906), False, 'from chillow.model.cell import Cell\n'), ((908, 914), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (912, 914), False, 'from chillow.model.cell import Cell\n'), ((916, 922), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (920, 922), False, 'from chillow.model.cell import Cell\n'), ((924, 930), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (928, 930), False, 'from chillow.model.cell import Cell\n'), ((951, 957), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (955, 957), False, 'from chillow.model.cell import Cell\n'), ((959, 965), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (963, 965), False, 'from chillow.model.cell import Cell\n'), ((967, 973), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (971, 973), False, 'from chillow.model.cell import Cell\n'), ((975, 981), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (979, 981), False, 'from chillow.model.cell import Cell\n'), ((983, 998), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (987, 998), False, 'from chillow.model.cell import Cell\n'), ((1659, 1665), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1663, 1665), False, 'from chillow.model.cell import Cell\n'), ((1667, 1673), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1671, 1673), False, 'from chillow.model.cell import Cell\n'), ((1675, 1681), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1679, 1681), False, 'from chillow.model.cell import Cell\n'), ((1683, 1689), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1687, 1689), False, 'from chillow.model.cell import Cell\n'), ((1691, 1697), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1695, 1697), False, 'from chillow.model.cell import Cell\n'), ((1718, 1733), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (1722, 1733), False, 'from chillow.model.cell import Cell\n'), ((1735, 1741), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1739, 1741), False, 'from chillow.model.cell import Cell\n'), ((1743, 1749), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1747, 1749), False, 'from chillow.model.cell import Cell\n'), ((1751, 1757), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1755, 1757), False, 'from chillow.model.cell import Cell\n'), ((1759, 1765), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1763, 1765), False, 'from chillow.model.cell import Cell\n'), ((1786, 1792), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1790, 1792), False, 'from chillow.model.cell import Cell\n'), ((1794, 1800), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1798, 1800), False, 'from chillow.model.cell import Cell\n'), ((1802, 1808), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1806, 1808), False, 'from chillow.model.cell import Cell\n'), ((1810, 1816), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1814, 1816), False, 'from chillow.model.cell import Cell\n'), ((1818, 1824), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1822, 1824), False, 'from chillow.model.cell import Cell\n'), ((1845, 1851), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1849, 1851), False, 'from chillow.model.cell import Cell\n'), ((1853, 1859), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1857, 1859), False, 'from chillow.model.cell import Cell\n'), ((1861, 1867), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1865, 1867), False, 'from chillow.model.cell import Cell\n'), ((1869, 1875), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1873, 1875), False, 'from chillow.model.cell import Cell\n'), ((1877, 1883), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1881, 1883), False, 'from chillow.model.cell import Cell\n'), ((1904, 1910), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1908, 1910), False, 'from chillow.model.cell import Cell\n'), ((1912, 1918), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1916, 1918), False, 'from chillow.model.cell import Cell\n'), ((1920, 1926), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1924, 1926), False, 'from chillow.model.cell import Cell\n'), ((1928, 1934), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (1932, 1934), False, 'from chillow.model.cell import Cell\n'), ((1936, 1951), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (1940, 1951), False, 'from chillow.model.cell import Cell\n'), ((2671, 2677), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2675, 2677), False, 'from chillow.model.cell import Cell\n'), ((2679, 2685), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2683, 2685), False, 'from chillow.model.cell import Cell\n'), ((2687, 2693), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2691, 2693), False, 'from chillow.model.cell import Cell\n'), ((2695, 2701), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2699, 2701), False, 'from chillow.model.cell import Cell\n'), ((2703, 2709), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2707, 2709), False, 'from chillow.model.cell import Cell\n'), ((2730, 2736), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2734, 2736), False, 'from chillow.model.cell import Cell\n'), ((2738, 2753), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (2742, 2753), False, 'from chillow.model.cell import Cell\n'), ((2755, 2761), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2759, 2761), False, 'from chillow.model.cell import Cell\n'), ((2763, 2769), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2767, 2769), False, 'from chillow.model.cell import Cell\n'), ((2771, 2777), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2775, 2777), False, 'from chillow.model.cell import Cell\n'), ((2798, 2804), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2802, 2804), False, 'from chillow.model.cell import Cell\n'), ((2806, 2821), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (2810, 2821), False, 'from chillow.model.cell import Cell\n'), ((2823, 2829), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2827, 2829), False, 'from chillow.model.cell import Cell\n'), ((2831, 2837), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2835, 2837), False, 'from chillow.model.cell import Cell\n'), ((2839, 2845), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2843, 2845), False, 'from chillow.model.cell import Cell\n'), ((2866, 2872), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2870, 2872), False, 'from chillow.model.cell import Cell\n'), ((2874, 2880), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2878, 2880), False, 'from chillow.model.cell import Cell\n'), ((2882, 2888), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2886, 2888), False, 'from chillow.model.cell import Cell\n'), ((2890, 2896), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2894, 2896), False, 'from chillow.model.cell import Cell\n'), ((2898, 2904), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2902, 2904), False, 'from chillow.model.cell import Cell\n'), ((2925, 2931), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2929, 2931), False, 'from chillow.model.cell import Cell\n'), ((2933, 2939), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2937, 2939), False, 'from chillow.model.cell import Cell\n'), ((2941, 2947), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2945, 2947), False, 'from chillow.model.cell import Cell\n'), ((2949, 2955), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2953, 2955), False, 'from chillow.model.cell import Cell\n'), ((2957, 2963), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (2961, 2963), False, 'from chillow.model.cell import Cell\n'), ((3687, 3693), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3691, 3693), False, 'from chillow.model.cell import Cell\n'), ((3695, 3701), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3699, 3701), False, 'from chillow.model.cell import Cell\n'), ((3703, 3709), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3707, 3709), False, 'from chillow.model.cell import Cell\n'), ((3711, 3717), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3715, 3717), False, 'from chillow.model.cell import Cell\n'), ((3719, 3725), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3723, 3725), False, 'from chillow.model.cell import Cell\n'), ((3746, 3761), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (3750, 3761), False, 'from chillow.model.cell import Cell\n'), ((3763, 3769), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3767, 3769), False, 'from chillow.model.cell import Cell\n'), ((3771, 3777), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3775, 3777), False, 'from chillow.model.cell import Cell\n'), ((3779, 3785), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3783, 3785), False, 'from chillow.model.cell import Cell\n'), ((3787, 3793), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3791, 3793), False, 'from chillow.model.cell import Cell\n'), ((3814, 3820), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3818, 3820), False, 'from chillow.model.cell import Cell\n'), ((3822, 3828), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3826, 3828), False, 'from chillow.model.cell import Cell\n'), ((3830, 3836), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3834, 3836), False, 'from chillow.model.cell import Cell\n'), ((3838, 3844), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3842, 3844), False, 'from chillow.model.cell import Cell\n'), ((3846, 3852), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3850, 3852), False, 'from chillow.model.cell import Cell\n'), ((3873, 3879), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3877, 3879), False, 'from chillow.model.cell import Cell\n'), ((3881, 3887), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3885, 3887), False, 'from chillow.model.cell import Cell\n'), ((3889, 3895), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3893, 3895), False, 'from chillow.model.cell import Cell\n'), ((3897, 3903), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3901, 3903), False, 'from chillow.model.cell import Cell\n'), ((3905, 3911), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3909, 3911), False, 'from chillow.model.cell import Cell\n'), ((3932, 3947), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (3936, 3947), False, 'from chillow.model.cell import Cell\n'), ((3949, 3955), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3953, 3955), False, 'from chillow.model.cell import Cell\n'), ((3957, 3963), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3961, 3963), False, 'from chillow.model.cell import Cell\n'), ((3965, 3971), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3969, 3971), False, 'from chillow.model.cell import Cell\n'), ((3973, 3979), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (3977, 3979), False, 'from chillow.model.cell import Cell\n'), ((4812, 4818), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4816, 4818), False, 'from chillow.model.cell import Cell\n'), ((4820, 4826), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4824, 4826), False, 'from chillow.model.cell import Cell\n'), ((4828, 4834), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4832, 4834), False, 'from chillow.model.cell import Cell\n'), ((4836, 4842), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4840, 4842), False, 'from chillow.model.cell import Cell\n'), ((4844, 4850), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4848, 4850), False, 'from chillow.model.cell import Cell\n'), ((4871, 4886), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (4875, 4886), False, 'from chillow.model.cell import Cell\n'), ((4888, 4894), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4892, 4894), False, 'from chillow.model.cell import Cell\n'), ((4896, 4902), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4900, 4902), False, 'from chillow.model.cell import Cell\n'), ((4904, 4910), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4908, 4910), False, 'from chillow.model.cell import Cell\n'), ((4912, 4918), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4916, 4918), False, 'from chillow.model.cell import Cell\n'), ((4939, 4945), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4943, 4945), False, 'from chillow.model.cell import Cell\n'), ((4947, 4953), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4951, 4953), False, 'from chillow.model.cell import Cell\n'), ((4955, 4961), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4959, 4961), False, 'from chillow.model.cell import Cell\n'), ((4963, 4969), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4967, 4969), False, 'from chillow.model.cell import Cell\n'), ((4971, 4977), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (4975, 4977), False, 'from chillow.model.cell import Cell\n'), ((4998, 5004), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5002, 5004), False, 'from chillow.model.cell import Cell\n'), ((5006, 5012), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5010, 5012), False, 'from chillow.model.cell import Cell\n'), ((5014, 5020), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5018, 5020), False, 'from chillow.model.cell import Cell\n'), ((5022, 5028), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5026, 5028), False, 'from chillow.model.cell import Cell\n'), ((5030, 5036), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5034, 5036), False, 'from chillow.model.cell import Cell\n'), ((5057, 5072), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (5061, 5072), False, 'from chillow.model.cell import Cell\n'), ((5074, 5080), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5078, 5080), False, 'from chillow.model.cell import Cell\n'), ((5082, 5088), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5086, 5088), False, 'from chillow.model.cell import Cell\n'), ((5090, 5096), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5094, 5096), False, 'from chillow.model.cell import Cell\n'), ((5098, 5104), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5102, 5104), False, 'from chillow.model.cell import Cell\n'), ((5801, 5807), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5805, 5807), False, 'from chillow.model.cell import Cell\n'), ((5809, 5815), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5813, 5815), False, 'from chillow.model.cell import Cell\n'), ((5817, 5823), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5821, 5823), False, 'from chillow.model.cell import Cell\n'), ((5825, 5831), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5829, 5831), False, 'from chillow.model.cell import Cell\n'), ((5833, 5839), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5837, 5839), False, 'from chillow.model.cell import Cell\n'), ((5860, 5875), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (5864, 5875), False, 'from chillow.model.cell import Cell\n'), ((5877, 5883), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5881, 5883), False, 'from chillow.model.cell import Cell\n'), ((5885, 5891), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5889, 5891), False, 'from chillow.model.cell import Cell\n'), ((5893, 5899), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5897, 5899), False, 'from chillow.model.cell import Cell\n'), ((5901, 5907), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5905, 5907), False, 'from chillow.model.cell import Cell\n'), ((5928, 5934), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5932, 5934), False, 'from chillow.model.cell import Cell\n'), ((5936, 5942), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5940, 5942), False, 'from chillow.model.cell import Cell\n'), ((5944, 5950), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5948, 5950), False, 'from chillow.model.cell import Cell\n'), ((5952, 5958), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5956, 5958), False, 'from chillow.model.cell import Cell\n'), ((5960, 5966), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5964, 5966), False, 'from chillow.model.cell import Cell\n'), ((5987, 5993), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5991, 5993), False, 'from chillow.model.cell import Cell\n'), ((5995, 6001), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (5999, 6001), False, 'from chillow.model.cell import Cell\n'), ((6003, 6009), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6007, 6009), False, 'from chillow.model.cell import Cell\n'), ((6011, 6017), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6015, 6017), False, 'from chillow.model.cell import Cell\n'), ((6019, 6025), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6023, 6025), False, 'from chillow.model.cell import Cell\n'), ((6046, 6061), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (6050, 6061), False, 'from chillow.model.cell import Cell\n'), ((6063, 6069), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6067, 6069), False, 'from chillow.model.cell import Cell\n'), ((6071, 6077), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6075, 6077), False, 'from chillow.model.cell import Cell\n'), ((6079, 6085), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6083, 6085), False, 'from chillow.model.cell import Cell\n'), ((6087, 6093), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (6091, 6093), False, 'from chillow.model.cell import Cell\n'), ((7066, 7072), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7070, 7072), False, 'from chillow.model.cell import Cell\n'), ((7074, 7080), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7078, 7080), False, 'from chillow.model.cell import Cell\n'), ((7082, 7088), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7086, 7088), False, 'from chillow.model.cell import Cell\n'), ((7090, 7096), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7094, 7096), False, 'from chillow.model.cell import Cell\n'), ((7098, 7104), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7102, 7104), False, 'from chillow.model.cell import Cell\n'), ((7125, 7131), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7129, 7131), False, 'from chillow.model.cell import Cell\n'), ((7133, 7139), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7137, 7139), False, 'from chillow.model.cell import Cell\n'), ((7141, 7147), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7145, 7147), False, 'from chillow.model.cell import Cell\n'), ((7149, 7155), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7153, 7155), False, 'from chillow.model.cell import Cell\n'), ((7157, 7163), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7161, 7163), False, 'from chillow.model.cell import Cell\n'), ((7184, 7190), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7188, 7190), False, 'from chillow.model.cell import Cell\n'), ((7192, 7198), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7196, 7198), False, 'from chillow.model.cell import Cell\n'), ((7200, 7206), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7204, 7206), False, 'from chillow.model.cell import Cell\n'), ((7208, 7214), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7212, 7214), False, 'from chillow.model.cell import Cell\n'), ((7216, 7222), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7220, 7222), False, 'from chillow.model.cell import Cell\n'), ((7243, 7249), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7247, 7249), False, 'from chillow.model.cell import Cell\n'), ((7251, 7257), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7255, 7257), False, 'from chillow.model.cell import Cell\n'), ((7259, 7265), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7263, 7265), False, 'from chillow.model.cell import Cell\n'), ((7267, 7273), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7271, 7273), False, 'from chillow.model.cell import Cell\n'), ((7275, 7281), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7279, 7281), False, 'from chillow.model.cell import Cell\n'), ((7302, 7317), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (7306, 7317), False, 'from chillow.model.cell import Cell\n'), ((7319, 7325), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7323, 7325), False, 'from chillow.model.cell import Cell\n'), ((7327, 7333), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7331, 7333), False, 'from chillow.model.cell import Cell\n'), ((7335, 7341), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (7339, 7341), False, 'from chillow.model.cell import Cell\n'), ((7343, 7358), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (7347, 7358), False, 'from chillow.model.cell import Cell\n'), ((8750, 8756), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8754, 8756), False, 'from chillow.model.cell import Cell\n'), ((8768, 8774), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8772, 8774), False, 'from chillow.model.cell import Cell\n'), ((8776, 8782), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8780, 8782), False, 'from chillow.model.cell import Cell\n'), ((8784, 8790), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8788, 8790), False, 'from chillow.model.cell import Cell\n'), ((8792, 8798), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8796, 8798), False, 'from chillow.model.cell import Cell\n'), ((8819, 8834), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (8823, 8834), False, 'from chillow.model.cell import Cell\n'), ((8837, 8852), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (8841, 8852), False, 'from chillow.model.cell import Cell\n'), ((8854, 8860), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8858, 8860), False, 'from chillow.model.cell import Cell\n'), ((8862, 8868), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8866, 8868), False, 'from chillow.model.cell import Cell\n'), ((8870, 8876), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8874, 8876), False, 'from chillow.model.cell import Cell\n'), ((8897, 8903), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8901, 8903), False, 'from chillow.model.cell import Cell\n'), ((8915, 8930), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (8919, 8930), False, 'from chillow.model.cell import Cell\n'), ((8932, 8938), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8936, 8938), False, 'from chillow.model.cell import Cell\n'), ((8940, 8946), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8944, 8946), False, 'from chillow.model.cell import Cell\n'), ((8948, 8954), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8952, 8954), False, 'from chillow.model.cell import Cell\n'), ((8975, 8990), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (8979, 8990), False, 'from chillow.model.cell import Cell\n'), ((8993, 8999), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (8997, 8999), False, 'from chillow.model.cell import Cell\n'), ((9001, 9007), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9005, 9007), False, 'from chillow.model.cell import Cell\n'), ((9009, 9015), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9013, 9015), False, 'from chillow.model.cell import Cell\n'), ((9017, 9023), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9021, 9023), False, 'from chillow.model.cell import Cell\n'), ((9044, 9050), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9048, 9050), False, 'from chillow.model.cell import Cell\n'), ((9052, 9058), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9056, 9058), False, 'from chillow.model.cell import Cell\n'), ((9062, 9068), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9066, 9068), False, 'from chillow.model.cell import Cell\n'), ((9070, 9076), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9074, 9076), False, 'from chillow.model.cell import Cell\n'), ((9078, 9084), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9082, 9084), False, 'from chillow.model.cell import Cell\n'), ((9761, 9767), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9765, 9767), False, 'from chillow.model.cell import Cell\n'), ((9779, 9785), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9783, 9785), False, 'from chillow.model.cell import Cell\n'), ((9796, 9802), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9800, 9802), False, 'from chillow.model.cell import Cell\n'), ((9815, 9821), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9819, 9821), False, 'from chillow.model.cell import Cell\n'), ((9835, 9841), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9839, 9841), False, 'from chillow.model.cell import Cell\n'), ((9862, 9877), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (9866, 9877), False, 'from chillow.model.cell import Cell\n'), ((9880, 9895), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (9884, 9895), False, 'from chillow.model.cell import Cell\n'), ((9897, 9912), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (9901, 9912), False, 'from chillow.model.cell import Cell\n'), ((9916, 9922), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9920, 9922), False, 'from chillow.model.cell import Cell\n'), ((9936, 9942), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9940, 9942), False, 'from chillow.model.cell import Cell\n'), ((9963, 9969), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (9967, 9969), False, 'from chillow.model.cell import Cell\n'), ((9981, 9996), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (9985, 9996), False, 'from chillow.model.cell import Cell\n'), ((9998, 10004), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10002, 10004), False, 'from chillow.model.cell import Cell\n'), ((10017, 10032), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (10021, 10032), False, 'from chillow.model.cell import Cell\n'), ((10037, 10043), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10041, 10043), False, 'from chillow.model.cell import Cell\n'), ((10064, 10079), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (10068, 10079), False, 'from chillow.model.cell import Cell\n'), ((10082, 10088), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10086, 10088), False, 'from chillow.model.cell import Cell\n'), ((10099, 10114), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (10103, 10114), False, 'from chillow.model.cell import Cell\n'), ((10118, 10124), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10122, 10124), False, 'from chillow.model.cell import Cell\n'), ((10138, 10144), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10142, 10144), False, 'from chillow.model.cell import Cell\n'), ((10165, 10171), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10169, 10171), False, 'from chillow.model.cell import Cell\n'), ((10183, 10189), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10187, 10189), False, 'from chillow.model.cell import Cell\n'), ((10200, 10206), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10204, 10206), False, 'from chillow.model.cell import Cell\n'), ((10219, 10225), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10223, 10225), False, 'from chillow.model.cell import Cell\n'), ((10239, 10245), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10243, 10245), False, 'from chillow.model.cell import Cell\n'), ((10856, 10862), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10860, 10862), False, 'from chillow.model.cell import Cell\n'), ((10874, 10880), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10878, 10880), False, 'from chillow.model.cell import Cell\n'), ((10891, 10897), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10895, 10897), False, 'from chillow.model.cell import Cell\n'), ((10910, 10916), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10914, 10916), False, 'from chillow.model.cell import Cell\n'), ((10930, 10936), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (10934, 10936), False, 'from chillow.model.cell import Cell\n'), ((10957, 10972), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (10961, 10972), False, 'from chillow.model.cell import Cell\n'), ((10975, 10990), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (10979, 10990), False, 'from chillow.model.cell import Cell\n'), ((10992, 11007), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (10996, 11007), False, 'from chillow.model.cell import Cell\n'), ((11011, 11017), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11015, 11017), False, 'from chillow.model.cell import Cell\n'), ((11031, 11037), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11035, 11037), False, 'from chillow.model.cell import Cell\n'), ((11058, 11064), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11062, 11064), False, 'from chillow.model.cell import Cell\n'), ((11076, 11091), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (11080, 11091), False, 'from chillow.model.cell import Cell\n'), ((11093, 11099), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11097, 11099), False, 'from chillow.model.cell import Cell\n'), ((11112, 11127), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (11116, 11127), False, 'from chillow.model.cell import Cell\n'), ((11132, 11138), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11136, 11138), False, 'from chillow.model.cell import Cell\n'), ((11159, 11174), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (11163, 11174), False, 'from chillow.model.cell import Cell\n'), ((11177, 11183), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11181, 11183), False, 'from chillow.model.cell import Cell\n'), ((11194, 11200), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11198, 11200), False, 'from chillow.model.cell import Cell\n'), ((11213, 11219), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11217, 11219), False, 'from chillow.model.cell import Cell\n'), ((11233, 11239), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11237, 11239), False, 'from chillow.model.cell import Cell\n'), ((11260, 11266), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11264, 11266), False, 'from chillow.model.cell import Cell\n'), ((11278, 11284), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11282, 11284), False, 'from chillow.model.cell import Cell\n'), ((11295, 11301), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11299, 11301), False, 'from chillow.model.cell import Cell\n'), ((11314, 11320), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11318, 11320), False, 'from chillow.model.cell import Cell\n'), ((11334, 11340), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (11338, 11340), False, 'from chillow.model.cell import Cell\n'), ((12006, 12012), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12010, 12012), False, 'from chillow.model.cell import Cell\n'), ((12024, 12030), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12028, 12030), False, 'from chillow.model.cell import Cell\n'), ((12041, 12047), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12045, 12047), False, 'from chillow.model.cell import Cell\n'), ((12060, 12066), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12064, 12066), False, 'from chillow.model.cell import Cell\n'), ((12080, 12086), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12084, 12086), False, 'from chillow.model.cell import Cell\n'), ((12107, 12122), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12111, 12122), False, 'from chillow.model.cell import Cell\n'), ((12125, 12140), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12129, 12140), False, 'from chillow.model.cell import Cell\n'), ((12142, 12157), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12146, 12157), False, 'from chillow.model.cell import Cell\n'), ((12161, 12167), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12165, 12167), False, 'from chillow.model.cell import Cell\n'), ((12181, 12187), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12185, 12187), False, 'from chillow.model.cell import Cell\n'), ((12208, 12214), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12212, 12214), False, 'from chillow.model.cell import Cell\n'), ((12226, 12241), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (12230, 12241), False, 'from chillow.model.cell import Cell\n'), ((12243, 12249), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12247, 12249), False, 'from chillow.model.cell import Cell\n'), ((12262, 12277), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12266, 12277), False, 'from chillow.model.cell import Cell\n'), ((12282, 12288), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12286, 12288), False, 'from chillow.model.cell import Cell\n'), ((12309, 12324), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12313, 12324), False, 'from chillow.model.cell import Cell\n'), ((12327, 12342), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12331, 12342), False, 'from chillow.model.cell import Cell\n'), ((12344, 12350), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12348, 12350), False, 'from chillow.model.cell import Cell\n'), ((12363, 12378), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12367, 12378), False, 'from chillow.model.cell import Cell\n'), ((12383, 12389), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12387, 12389), False, 'from chillow.model.cell import Cell\n'), ((12410, 12416), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12414, 12416), False, 'from chillow.model.cell import Cell\n'), ((12428, 12434), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12432, 12434), False, 'from chillow.model.cell import Cell\n'), ((12445, 12460), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (12449, 12460), False, 'from chillow.model.cell import Cell\n'), ((12464, 12470), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12468, 12470), False, 'from chillow.model.cell import Cell\n'), ((12484, 12490), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (12488, 12490), False, 'from chillow.model.cell import Cell\n'), ((13097, 13103), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13101, 13103), False, 'from chillow.model.cell import Cell\n'), ((13115, 13121), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13119, 13121), False, 'from chillow.model.cell import Cell\n'), ((13132, 13138), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13136, 13138), False, 'from chillow.model.cell import Cell\n'), ((13151, 13157), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13155, 13157), False, 'from chillow.model.cell import Cell\n'), ((13171, 13177), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13175, 13177), False, 'from chillow.model.cell import Cell\n'), ((13198, 13213), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13202, 13213), False, 'from chillow.model.cell import Cell\n'), ((13216, 13231), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13220, 13231), False, 'from chillow.model.cell import Cell\n'), ((13233, 13248), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13237, 13248), False, 'from chillow.model.cell import Cell\n'), ((13252, 13258), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13256, 13258), False, 'from chillow.model.cell import Cell\n'), ((13272, 13278), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13276, 13278), False, 'from chillow.model.cell import Cell\n'), ((13299, 13305), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13303, 13305), False, 'from chillow.model.cell import Cell\n'), ((13317, 13332), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (13321, 13332), False, 'from chillow.model.cell import Cell\n'), ((13334, 13340), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13338, 13340), False, 'from chillow.model.cell import Cell\n'), ((13353, 13368), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13357, 13368), False, 'from chillow.model.cell import Cell\n'), ((13373, 13379), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13377, 13379), False, 'from chillow.model.cell import Cell\n'), ((13400, 13415), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13404, 13415), False, 'from chillow.model.cell import Cell\n'), ((13418, 13433), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13422, 13433), False, 'from chillow.model.cell import Cell\n'), ((13435, 13441), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13439, 13441), False, 'from chillow.model.cell import Cell\n'), ((13454, 13469), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13458, 13469), False, 'from chillow.model.cell import Cell\n'), ((13474, 13480), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13478, 13480), False, 'from chillow.model.cell import Cell\n'), ((13501, 13507), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13505, 13507), False, 'from chillow.model.cell import Cell\n'), ((13519, 13525), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13523, 13525), False, 'from chillow.model.cell import Cell\n'), ((13536, 13551), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (13540, 13551), False, 'from chillow.model.cell import Cell\n'), ((13555, 13561), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13559, 13561), False, 'from chillow.model.cell import Cell\n'), ((13575, 13581), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (13579, 13581), False, 'from chillow.model.cell import Cell\n'), ((14238, 14244), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14242, 14244), False, 'from chillow.model.cell import Cell\n'), ((14256, 14262), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14260, 14262), False, 'from chillow.model.cell import Cell\n'), ((14273, 14279), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14277, 14279), False, 'from chillow.model.cell import Cell\n'), ((14292, 14298), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14296, 14298), False, 'from chillow.model.cell import Cell\n'), ((14312, 14318), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14316, 14318), False, 'from chillow.model.cell import Cell\n'), ((14339, 14354), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14343, 14354), False, 'from chillow.model.cell import Cell\n'), ((14357, 14372), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14361, 14372), False, 'from chillow.model.cell import Cell\n'), ((14374, 14389), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14378, 14389), False, 'from chillow.model.cell import Cell\n'), ((14393, 14399), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14397, 14399), False, 'from chillow.model.cell import Cell\n'), ((14413, 14419), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14417, 14419), False, 'from chillow.model.cell import Cell\n'), ((14440, 14446), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14444, 14446), False, 'from chillow.model.cell import Cell\n'), ((14458, 14473), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (14462, 14473), False, 'from chillow.model.cell import Cell\n'), ((14475, 14481), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14479, 14481), False, 'from chillow.model.cell import Cell\n'), ((14494, 14509), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14498, 14509), False, 'from chillow.model.cell import Cell\n'), ((14514, 14520), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14518, 14520), False, 'from chillow.model.cell import Cell\n'), ((14541, 14556), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14545, 14556), False, 'from chillow.model.cell import Cell\n'), ((14559, 14565), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14563, 14565), False, 'from chillow.model.cell import Cell\n'), ((14567, 14573), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14571, 14573), False, 'from chillow.model.cell import Cell\n'), ((14586, 14601), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14590, 14601), False, 'from chillow.model.cell import Cell\n'), ((14606, 14612), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14610, 14612), False, 'from chillow.model.cell import Cell\n'), ((14633, 14639), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14637, 14639), False, 'from chillow.model.cell import Cell\n'), ((14651, 14657), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14655, 14657), False, 'from chillow.model.cell import Cell\n'), ((14668, 14683), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (14672, 14683), False, 'from chillow.model.cell import Cell\n'), ((14687, 14693), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14691, 14693), False, 'from chillow.model.cell import Cell\n'), ((14707, 14713), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (14711, 14713), False, 'from chillow.model.cell import Cell\n'), ((15386, 15392), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15390, 15392), False, 'from chillow.model.cell import Cell\n'), ((15404, 15410), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15408, 15410), False, 'from chillow.model.cell import Cell\n'), ((15421, 15427), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15425, 15427), False, 'from chillow.model.cell import Cell\n'), ((15440, 15446), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15444, 15446), False, 'from chillow.model.cell import Cell\n'), ((15460, 15466), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15464, 15466), False, 'from chillow.model.cell import Cell\n'), ((15487, 15502), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15491, 15502), False, 'from chillow.model.cell import Cell\n'), ((15505, 15520), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15509, 15520), False, 'from chillow.model.cell import Cell\n'), ((15522, 15537), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15526, 15537), False, 'from chillow.model.cell import Cell\n'), ((15541, 15547), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15545, 15547), False, 'from chillow.model.cell import Cell\n'), ((15561, 15567), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15565, 15567), False, 'from chillow.model.cell import Cell\n'), ((15588, 15594), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15592, 15594), False, 'from chillow.model.cell import Cell\n'), ((15606, 15621), 'chillow.model.cell.Cell', 'Cell', (['[player1]'], {}), '([player1])\n', (15610, 15621), False, 'from chillow.model.cell import Cell\n'), ((15623, 15629), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15627, 15629), False, 'from chillow.model.cell import Cell\n'), ((15642, 15657), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15646, 15657), False, 'from chillow.model.cell import Cell\n'), ((15662, 15668), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15666, 15668), False, 'from chillow.model.cell import Cell\n'), ((15689, 15704), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15693, 15704), False, 'from chillow.model.cell import Cell\n'), ((15707, 15713), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15711, 15713), False, 'from chillow.model.cell import Cell\n'), ((15724, 15739), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15728, 15739), False, 'from chillow.model.cell import Cell\n'), ((15744, 15759), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15748, 15759), False, 'from chillow.model.cell import Cell\n'), ((15764, 15770), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15768, 15770), False, 'from chillow.model.cell import Cell\n'), ((15791, 15797), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15795, 15797), False, 'from chillow.model.cell import Cell\n'), ((15809, 15815), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15813, 15815), False, 'from chillow.model.cell import Cell\n'), ((15826, 15841), 'chillow.model.cell.Cell', 'Cell', (['[player2]'], {}), '([player2])\n', (15830, 15841), False, 'from chillow.model.cell import Cell\n'), ((15845, 15851), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15849, 15851), False, 'from chillow.model.cell import Cell\n'), ((15865, 15871), 'chillow.model.cell.Cell', 'Cell', ([], {}), '()\n', (15869, 15871), False, 'from chillow.model.cell import Cell\n')]
|
import json
import pytest
@pytest.mark.usefixtures('client', 'headers')
class TestInfection:
def test_infection_region_tc01(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'false'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc02(self, client, headers):
# db has no data BETWEEN 2020-03-25 2020-03-26
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-25',
'end_date': '2020-03-26',
'include_hmt': 'false'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc03(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc04(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
# 'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc05(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-01-22',
# 'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 400
print("response: ", response.text)
response_data = json.loads(response.text)['code']
assert response_data == "30018"
def test_infection_region_detail(self, client, headers):
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region/detail', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
@pytest.mark.skip
def test_infection_area(self, client, headers):
region = 'China'
area = 'Chongqing'
payload = {
'region': region,
'area': area,
'start_date': '2020-03-22',
'end_date': '2020-03-24'
}
response = client.get('/infection/area', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_global(self, client, headers):
response = client.get('/infection/global', headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
|
[
"json.loads",
"pytest.mark.usefixtures"
] |
[((30, 74), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""client"""', '"""headers"""'], {}), "('client', 'headers')\n", (53, 74), False, 'import pytest\n'), ((600, 625), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (610, 625), False, 'import json\n'), ((1171, 1196), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1181, 1196), False, 'import json\n'), ((1762, 1787), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1772, 1787), False, 'import json\n'), ((2355, 2380), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2365, 2380), False, 'import json\n'), ((2948, 2973), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2958, 2973), False, 'import json\n'), ((3482, 3507), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (3492, 3507), False, 'import json\n'), ((4028, 4053), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (4038, 4053), False, 'import json\n'), ((4324, 4349), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (4334, 4349), False, 'import json\n')]
|
"""Module grouping tests for the pydov.util.owsutil module."""
import copy
import re
import pytest
from numpy.compat import unicode
from owslib.etree import etree
from owslib.fes import (
PropertyIsEqualTo,
FilterRequest,
)
from owslib.iso import MD_Metadata
from owslib.util import nspath_eval
from pydov.util import owsutil
from pydov.util.errors import (
MetadataNotFoundError,
FeatureCatalogueNotFoundError,
)
from pydov.util.location import (
Within,
Box,
)
from tests.test_search_boring import (
md_metadata,
mp_remote_md,
mp_remote_describefeaturetype,
mp_remote_fc,
location_md_metadata,
location_fc_featurecatalogue,
location_wfs_describefeaturetype,
)
from tests.test_search import (
wfs,
mp_wfs,
mp_remote_fc_notfound
)
def clean_xml(xml):
"""Clean the given XML string of namespace definition, namespace
prefixes and syntactical but otherwise meaningless differences.
Parameters
----------
xml : str
String representation of XML document.
Returns
-------
str
String representation of cleaned XML document.
"""
# remove xmlns namespace definitions
r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml)
# remove namespace prefixes in tags
r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r)
# remove extra spaces in tags
r = re.sub(r'[ ]+/>', '/>', r)
# remove extra spaces between tags
r = re.sub(r'>[ ]+<', '><', r)
return r
class TestOwsutil(object):
"""Class grouping tests for the pydov.util.owsutil module."""
def test_get_csw_base_url(self, wfs):
"""Test the owsutil.get_csw_base_url method.
Test whether the CSW base URL of the dov-pub:Boringen layer is correct.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
contentmetadata = wfs.contents['dov-pub:Boringen']
assert owsutil.get_csw_base_url(contentmetadata) == \
'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw'
def test_get_csw_base_url_nometadataurls(self, wfs):
"""Test the owsutil.get_csw_base_url method for a layer without
metdata urls.
Test whether a MetadataNotFoundError is raised.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
contents = copy.deepcopy(wfs.contents)
contentmetadata = contents['dov-pub:Boringen']
contentmetadata.metadataUrls = []
with pytest.raises(MetadataNotFoundError):
owsutil.get_csw_base_url(contentmetadata)
def test_get_featurecatalogue_uuid(self, md_metadata):
"""Test the owsutil.get_featurecatalogue_uuid method.
Test whether the featurecatalogue uuid of the dov-pub:Boringen layer
is correct.
Parameters
----------
md_metadata : pytest.fixture providing owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
assert owsutil.get_featurecatalogue_uuid(md_metadata) == \
'c0cbd397-520f-4ee1-aca7-d70e271eeed6'
def test_get_featurecatalogue_uuid_nocontentinfo(self, md_metadata):
"""Test the owsutil.get_featurecatalogue_uuid method when the
metadata is missing the gmd:contentInfo element.
Test whether a FeatureCatalogueNotFoundError is raised.
Parameters
----------
md_metadata : pytest.fixture providing owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
tree = etree.fromstring(md_metadata.xml)
root = tree.find('{http://www.isotc211.org/2005/gmd}MD_Metadata')
for ci in tree.findall(
'.//{http://www.isotc211.org/2005/gmd}contentInfo'):
root.remove(ci)
md_metadata.xml = etree.tostring(tree)
with pytest.raises(FeatureCatalogueNotFoundError):
owsutil.get_featurecatalogue_uuid(md_metadata)
def test_get_featurecatalogue_uuid_nouuidref(self, md_metadata):
"""Test the owsutil.get_featurecatalogue_uuid method when the
gmd:contentInfo element is missing a 'uuidref' attribute.
Test whether a FeatureCatalogueNotFoundError is raised.
Parameters
----------
md_metadata : pytest.fixture providing owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
tree = etree.fromstring(md_metadata.xml)
for ci in tree.findall(nspath_eval(
'gmd:MD_Metadata/gmd:contentInfo/'
'gmd:MD_FeatureCatalogueDescription/'
'gmd:featureCatalogueCitation',
{'gmd': 'http://www.isotc211.org/2005/gmd'})):
ci.attrib.pop('uuidref')
md_metadata.xml = etree.tostring(tree)
with pytest.raises(FeatureCatalogueNotFoundError):
owsutil.get_featurecatalogue_uuid(md_metadata)
def test_get_namespace(self, wfs, mp_remote_describefeaturetype):
"""Test the owsutil.get_namespace method.
Test whether the namespace of the dov-pub:Boringen layer is correct.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType of the
dov-pub:Boringen layer.
"""
assert owsutil.get_namespace(wfs, 'dov-pub:Boringen') == \
'http://dov.vlaanderen.be/ocdov/dov-pub'
def test_get_remote_featurecatalogue(self, mp_remote_fc):
"""Test the owsutil.get_remote_featurecatalogue method.
Test whether the feature catalogue of the dov-pub:Boringen layer
matches the format described in the docs.
Parameters
----------
mp_remote_fc : pytest.fixture
Monkeypatch the call to get the remote feature catalogue of the
dov-pub:Boringen layer.
"""
fc = owsutil.get_remote_featurecatalogue(
'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw',
'c0cbd397-520f-4ee1-aca7-d70e271eeed6')
assert type(fc) is dict
assert 'definition' in fc
assert type(fc['definition']) in (str, unicode)
assert 'attributes' in fc
assert type(fc['attributes']) is dict
attrs = fc['attributes']
if len(attrs) > 0:
for attr in attrs.values():
assert type(attr) is dict
assert 'definition' in attr
assert type(attr['definition']) in (str, unicode)
assert 'values' in attr
assert type(attr['values']) is list
if len(attr['values']) > 0:
for v in attr['values']:
assert type(v) in (str, unicode)
assert len(attr['values']) == len(set(attr['values']))
assert 'multiplicity' in attr
mp = attr['multiplicity']
assert type(mp) is tuple
assert len(mp) == 2
assert mp[0] in (0, 1)
assert (type(mp[1]) is int and mp[1] > 0) or mp[1] == 'Inf'
def test_get_remote_featurecataloge_baduuid(self, mp_remote_fc_notfound):
"""Test the owsutil.get_remote_featurecatalogue method with an
inexistent feature catalogue uuid.
Test whether a FeatureCatalogueNotFoundError is raised.
Parameters
----------
mp_remote_fc_notfound : pytest.fixture
Monkeypatch the call to get an inexistent remote featurecatalogue.
"""
with pytest.raises(FeatureCatalogueNotFoundError):
owsutil.get_remote_featurecatalogue(
'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw',
'badfc000-0000-0000-0000-badfc00badfc')
def test_get_remote_metadata(self, md_metadata):
"""Test the owsutil.get_remote_metadata method.
Test whether the resulting MD_Metadata is correct.
Parameters
----------
md_metadata : pytest.fixture returning owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
assert type(md_metadata) is MD_Metadata
def test_get_remote_metadata_nometadataurls(self, wfs):
"""Test the owsutil.get_remote_metadata method when the WFS layer
missed metadata URLs.
Test whether a MetadataNotFoundError is raised.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
contents = copy.deepcopy(wfs.contents)
contentmetadata = contents['dov-pub:Boringen']
contentmetadata.metadataUrls = []
with pytest.raises(MetadataNotFoundError):
owsutil.get_remote_metadata(contentmetadata)
def test_wfs_build_getfeature_request_onlytypename(self):
"""Test the owsutil.wfs_build_getfeature_request method with only a
typename specified.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
xml = owsutil.wfs_build_getfeature_request('dov-pub:Boringen')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query '
'typeName="dov-pub:Boringen"><ogc:Filter '
'xmlns:ogc="http://www.opengis.net/ogc"/></wfs:Query></wfs'
':GetFeature>')
def test_wfs_build_getfeature_request_bbox_nogeometrycolumn(self):
"""Test the owsutil.wfs_build_getfeature_request method with a location
argument but without the geometry_column argument.
Test whether an AttributeError is raised.
"""
with pytest.raises(AttributeError):
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen',
location=Within(Box(151650, 214675, 151750, 214775)))
def test_wfs_build_getfeature_request_bbox(self):
"""Test the owsutil.wfs_build_getfeature_request method with a
typename, box and geometry_column.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen',
location=Within(Box(151650, 214675, 151750, 214775)),
geometry_column='geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query '
'typeName="dov-pub:Boringen"><ogc:Filter '
'xmlns:ogc="http://www.opengis.net/ogc"><ogc:Within> '
'<ogc:PropertyName>geom</ogc:PropertyName><gml:Envelope '
'xmlns:gml="http://www.opengis.net/gml" srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
':lowerCorner>151650.000 '
'214675.000</gml:lowerCorner><gml:upperCorner>151750.000 '
'214775.000</gml:upperCorner></gml:Envelope></ogc:Within></ogc'
':Filter></wfs:Query></wfs:GetFeature>')
def test_wfs_build_getfeature_request_propertyname(self):
"""Test the owsutil.wfs_build_getfeature_request method with a list
of propertynames.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', propertyname=['fiche', 'diepte_tot_m'])
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> '
'<wfs:PropertyName>fiche</wfs:PropertyName> '
'<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter/> '
'</wfs:Query> </wfs:GetFeature>')
def test_wfs_build_getfeature_request_filter(self):
"""Test the owsutil.wfs_build_getfeature_request method with an
attribute filter.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Herstappe')
filter_request = FilterRequest()
filter_request = filter_request.setConstraint(query)
try:
filter_request = etree.tostring(filter_request,
encoding='unicode')
except LookupError:
# Python2.7 without lxml uses 'utf-8' instead.
filter_request = etree.tostring(filter_request,
encoding='utf-8')
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', filter=filter_request)
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> <ogc:Filter> '
'<ogc:PropertyIsEqualTo> '
'<ogc:PropertyName>gemeente</ogc:PropertyName> '
'<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
'</ogc:Filter> </wfs:Query> </wfs:GetFeature>')
def test_wfs_build_getfeature_request_bbox_filter(self):
"""Test the owsutil.wfs_build_getfeature_request method with an
attribute filter, a box and a geometry_column.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Herstappe')
filter_request = FilterRequest()
filter_request = filter_request.setConstraint(query)
try:
filter_request = etree.tostring(filter_request,
encoding='unicode')
except LookupError:
# Python2.7 without lxml uses 'utf-8' instead.
filter_request = etree.tostring(filter_request,
encoding='utf-8')
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', filter=filter_request,
location=Within(Box(151650, 214675, 151750, 214775)),
geometry_column='geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> <ogc:Filter> <ogc:And> '
'<ogc:PropertyIsEqualTo> '
'<ogc:PropertyName>gemeente</ogc:PropertyName> '
'<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
'<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> '
'<gml:Envelope xmlns:gml="http://www.opengis.net/gml" '
'srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> '
'<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> '
'<gml:upperCorner>151750.000 214775.000</gml:upperCorner> '
'</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> '
'</wfs:Query> </wfs:GetFeature>')
def test_wfs_build_getfeature_request_bbox_filter_propertyname(self):
"""Test the owsutil.wfs_build_getfeature_request method with an
attribute filter, a box, a geometry_column and a list of
propertynames.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Herstappe')
filter_request = FilterRequest()
filter_request = filter_request.setConstraint(query)
try:
filter_request = etree.tostring(filter_request,
encoding='unicode')
except LookupError:
# Python2.7 without lxml uses 'utf-8' instead.
filter_request = etree.tostring(filter_request,
encoding='utf-8')
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', filter=filter_request,
location=Within(Box(151650, 214675, 151750, 214775)),
geometry_column='geom', propertyname=['fiche', 'diepte_tot_m'])
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> '
'<wfs:PropertyName>fiche</wfs:PropertyName> '
'<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter> '
'<ogc:And> <ogc:PropertyIsEqualTo> '
'<ogc:PropertyName>gemeente</ogc:PropertyName> '
'<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
'<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> '
'<gml:Envelope xmlns:gml="http://www.opengis.net/gml" '
'srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> '
'<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> '
'<gml:upperCorner>151750.000 214775.000</gml:upperCorner> '
'</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> '
'</wfs:Query> </wfs:GetFeature>')
|
[
"owslib.util.nspath_eval",
"owslib.fes.PropertyIsEqualTo",
"pydov.util.owsutil.get_namespace",
"pydov.util.owsutil.wfs_build_getfeature_request",
"pydov.util.owsutil.get_remote_metadata",
"pydov.util.location.Box",
"owslib.etree.etree.fromstring",
"owslib.etree.etree.tostring",
"pydov.util.owsutil.get_csw_base_url",
"pytest.raises",
"owslib.fes.FilterRequest",
"copy.deepcopy",
"re.sub",
"pydov.util.owsutil.get_remote_featurecatalogue",
"pydov.util.owsutil.get_featurecatalogue_uuid"
] |
[((1196, 1238), 're.sub', 're.sub', (['"""[ ]+xmlns:[^=]+="[^"]+\\""""', '""""""', 'xml'], {}), '(\'[ ]+xmlns:[^=]+="[^"]+"\', \'\', xml)\n', (1202, 1238), False, 'import re\n'), ((1289, 1341), 're.sub', 're.sub', (['"""<(/?)[^:]+:([^ >]+)([ >])"""', '"""<\\\\1\\\\2\\\\3"""', 'r'], {}), "('<(/?)[^:]+:([^ >]+)([ >])', '<\\\\1\\\\2\\\\3', r)\n", (1295, 1341), False, 'import re\n'), ((1384, 1409), 're.sub', 're.sub', (['"""[ ]+/>"""', '"""/>"""', 'r'], {}), "('[ ]+/>', '/>', r)\n", (1390, 1409), False, 'import re\n'), ((1459, 1484), 're.sub', 're.sub', (['""">[ ]+<"""', '"""><"""', 'r'], {}), "('>[ ]+<', '><', r)\n", (1465, 1484), False, 'import re\n'), ((2562, 2589), 'copy.deepcopy', 'copy.deepcopy', (['wfs.contents'], {}), '(wfs.contents)\n', (2575, 2589), False, 'import copy\n'), ((3900, 3933), 'owslib.etree.etree.fromstring', 'etree.fromstring', (['md_metadata.xml'], {}), '(md_metadata.xml)\n', (3916, 3933), False, 'from owslib.etree import etree\n'), ((4163, 4183), 'owslib.etree.etree.tostring', 'etree.tostring', (['tree'], {}), '(tree)\n', (4177, 4183), False, 'from owslib.etree import etree\n'), ((4832, 4865), 'owslib.etree.etree.fromstring', 'etree.fromstring', (['md_metadata.xml'], {}), '(md_metadata.xml)\n', (4848, 4865), False, 'from owslib.etree import etree\n'), ((5173, 5193), 'owslib.etree.etree.tostring', 'etree.tostring', (['tree'], {}), '(tree)\n', (5187, 5193), False, 'from owslib.etree import etree\n'), ((6450, 6589), 'pydov.util.owsutil.get_remote_featurecatalogue', 'owsutil.get_remote_featurecatalogue', (['"""https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw"""', '"""c0cbd397-520f-4ee1-aca7-d70e271eeed6"""'], {}), "(\n 'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw',\n 'c0cbd397-520f-4ee1-aca7-d70e271eeed6')\n", (6485, 6589), False, 'from pydov.util import owsutil\n'), ((9219, 9246), 'copy.deepcopy', 'copy.deepcopy', (['wfs.contents'], {}), '(wfs.contents)\n', (9232, 9246), False, 'import copy\n'), ((9727, 9783), 'pydov.util.owsutil.wfs_build_getfeature_request', 'owsutil.wfs_build_getfeature_request', (['"""dov-pub:Boringen"""'], {}), "('dov-pub:Boringen')\n", (9763, 9783), False, 'from pydov.util import owsutil\n'), ((12505, 12606), 'pydov.util.owsutil.wfs_build_getfeature_request', 'owsutil.wfs_build_getfeature_request', (['"""dov-pub:Boringen"""'], {'propertyname': "['fiche', 'diepte_tot_m']"}), "('dov-pub:Boringen', propertyname=[\n 'fiche', 'diepte_tot_m'])\n", (12541, 12606), False, 'from pydov.util import owsutil\n'), ((13501, 13564), 'owslib.fes.PropertyIsEqualTo', 'PropertyIsEqualTo', ([], {'propertyname': '"""gemeente"""', 'literal': '"""Herstappe"""'}), "(propertyname='gemeente', literal='Herstappe')\n", (13518, 13564), False, 'from owslib.fes import PropertyIsEqualTo, FilterRequest\n'), ((13624, 13639), 'owslib.fes.FilterRequest', 'FilterRequest', ([], {}), '()\n', (13637, 13639), False, 'from owslib.fes import PropertyIsEqualTo, FilterRequest\n'), ((14062, 14141), 'pydov.util.owsutil.wfs_build_getfeature_request', 'owsutil.wfs_build_getfeature_request', (['"""dov-pub:Boringen"""'], {'filter': 'filter_request'}), "('dov-pub:Boringen', filter=filter_request)\n", (14098, 14141), False, 'from pydov.util import owsutil\n'), ((15142, 15205), 'owslib.fes.PropertyIsEqualTo', 'PropertyIsEqualTo', ([], {'propertyname': '"""gemeente"""', 'literal': '"""Herstappe"""'}), "(propertyname='gemeente', literal='Herstappe')\n", (15159, 15205), False, 'from owslib.fes import PropertyIsEqualTo, FilterRequest\n'), ((15265, 15280), 'owslib.fes.FilterRequest', 'FilterRequest', ([], {}), '()\n', (15278, 15280), False, 'from owslib.fes import PropertyIsEqualTo, FilterRequest\n'), ((17383, 17446), 'owslib.fes.PropertyIsEqualTo', 'PropertyIsEqualTo', ([], {'propertyname': '"""gemeente"""', 'literal': '"""Herstappe"""'}), "(propertyname='gemeente', literal='Herstappe')\n", (17400, 17446), False, 'from owslib.fes import PropertyIsEqualTo, FilterRequest\n'), ((17506, 17521), 'owslib.fes.FilterRequest', 'FilterRequest', ([], {}), '()\n', (17519, 17521), False, 'from owslib.fes import PropertyIsEqualTo, FilterRequest\n'), ((2032, 2073), 'pydov.util.owsutil.get_csw_base_url', 'owsutil.get_csw_base_url', (['contentmetadata'], {}), '(contentmetadata)\n', (2056, 2073), False, 'from pydov.util import owsutil\n'), ((2700, 2736), 'pytest.raises', 'pytest.raises', (['MetadataNotFoundError'], {}), '(MetadataNotFoundError)\n', (2713, 2736), False, 'import pytest\n'), ((2750, 2791), 'pydov.util.owsutil.get_csw_base_url', 'owsutil.get_csw_base_url', (['contentmetadata'], {}), '(contentmetadata)\n', (2774, 2791), False, 'from pydov.util import owsutil\n'), ((3270, 3316), 'pydov.util.owsutil.get_featurecatalogue_uuid', 'owsutil.get_featurecatalogue_uuid', (['md_metadata'], {}), '(md_metadata)\n', (3303, 3316), False, 'from pydov.util import owsutil\n'), ((4198, 4242), 'pytest.raises', 'pytest.raises', (['FeatureCatalogueNotFoundError'], {}), '(FeatureCatalogueNotFoundError)\n', (4211, 4242), False, 'import pytest\n'), ((4256, 4302), 'pydov.util.owsutil.get_featurecatalogue_uuid', 'owsutil.get_featurecatalogue_uuid', (['md_metadata'], {}), '(md_metadata)\n', (4289, 4302), False, 'from pydov.util import owsutil\n'), ((4897, 5062), 'owslib.util.nspath_eval', 'nspath_eval', (['"""gmd:MD_Metadata/gmd:contentInfo/gmd:MD_FeatureCatalogueDescription/gmd:featureCatalogueCitation"""', "{'gmd': 'http://www.isotc211.org/2005/gmd'}"], {}), "(\n 'gmd:MD_Metadata/gmd:contentInfo/gmd:MD_FeatureCatalogueDescription/gmd:featureCatalogueCitation'\n , {'gmd': 'http://www.isotc211.org/2005/gmd'})\n", (4908, 5062), False, 'from owslib.util import nspath_eval\n'), ((5208, 5252), 'pytest.raises', 'pytest.raises', (['FeatureCatalogueNotFoundError'], {}), '(FeatureCatalogueNotFoundError)\n', (5221, 5252), False, 'import pytest\n'), ((5266, 5312), 'pydov.util.owsutil.get_featurecatalogue_uuid', 'owsutil.get_featurecatalogue_uuid', (['md_metadata'], {}), '(md_metadata)\n', (5299, 5312), False, 'from pydov.util import owsutil\n'), ((5876, 5922), 'pydov.util.owsutil.get_namespace', 'owsutil.get_namespace', (['wfs', '"""dov-pub:Boringen"""'], {}), "(wfs, 'dov-pub:Boringen')\n", (5897, 5922), False, 'from pydov.util import owsutil\n'), ((8109, 8153), 'pytest.raises', 'pytest.raises', (['FeatureCatalogueNotFoundError'], {}), '(FeatureCatalogueNotFoundError)\n', (8122, 8153), False, 'import pytest\n'), ((8167, 8306), 'pydov.util.owsutil.get_remote_featurecatalogue', 'owsutil.get_remote_featurecatalogue', (['"""https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw"""', '"""badfc000-0000-0000-0000-badfc00badfc"""'], {}), "(\n 'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw',\n 'badfc000-0000-0000-0000-badfc00badfc')\n", (8202, 8306), False, 'from pydov.util import owsutil\n'), ((9357, 9393), 'pytest.raises', 'pytest.raises', (['MetadataNotFoundError'], {}), '(MetadataNotFoundError)\n', (9370, 9393), False, 'import pytest\n'), ((9407, 9451), 'pydov.util.owsutil.get_remote_metadata', 'owsutil.get_remote_metadata', (['contentmetadata'], {}), '(contentmetadata)\n', (9434, 9451), False, 'from pydov.util import owsutil\n'), ((10620, 10649), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (10633, 10649), False, 'import pytest\n'), ((13743, 13793), 'owslib.etree.etree.tostring', 'etree.tostring', (['filter_request'], {'encoding': '"""unicode"""'}), "(filter_request, encoding='unicode')\n", (13757, 13793), False, 'from owslib.etree import etree\n'), ((15384, 15434), 'owslib.etree.etree.tostring', 'etree.tostring', (['filter_request'], {'encoding': '"""unicode"""'}), "(filter_request, encoding='unicode')\n", (15398, 15434), False, 'from owslib.etree import etree\n'), ((17625, 17675), 'owslib.etree.etree.tostring', 'etree.tostring', (['filter_request'], {'encoding': '"""unicode"""'}), "(filter_request, encoding='unicode')\n", (17639, 17675), False, 'from owslib.etree import etree\n'), ((13954, 14002), 'owslib.etree.etree.tostring', 'etree.tostring', (['filter_request'], {'encoding': '"""utf-8"""'}), "(filter_request, encoding='utf-8')\n", (13968, 14002), False, 'from owslib.etree import etree\n'), ((15595, 15643), 'owslib.etree.etree.tostring', 'etree.tostring', (['filter_request'], {'encoding': '"""utf-8"""'}), "(filter_request, encoding='utf-8')\n", (15609, 15643), False, 'from owslib.etree import etree\n'), ((17836, 17884), 'owslib.etree.etree.tostring', 'etree.tostring', (['filter_request'], {'encoding': '"""utf-8"""'}), "(filter_request, encoding='utf-8')\n", (17850, 17884), False, 'from owslib.etree import etree\n'), ((11188, 11223), 'pydov.util.location.Box', 'Box', (['(151650)', '(214675)', '(151750)', '(214775)'], {}), '(151650, 214675, 151750, 214775)\n', (11191, 11223), False, 'from pydov.util.location import Within, Box\n'), ((15824, 15859), 'pydov.util.location.Box', 'Box', (['(151650)', '(214675)', '(151750)', '(214775)'], {}), '(151650, 214675, 151750, 214775)\n', (15827, 15859), False, 'from pydov.util.location import Within, Box\n'), ((18065, 18100), 'pydov.util.location.Box', 'Box', (['(151650)', '(214675)', '(151750)', '(214775)'], {}), '(151650, 214675, 151750, 214775)\n', (18068, 18100), False, 'from pydov.util.location import Within, Box\n'), ((9809, 9828), 'owslib.etree.etree.tostring', 'etree.tostring', (['xml'], {}), '(xml)\n', (9823, 9828), False, 'from owslib.etree import etree\n'), ((10775, 10810), 'pydov.util.location.Box', 'Box', (['(151650)', '(214675)', '(151750)', '(214775)'], {}), '(151650, 214675, 151750, 214775)\n', (10778, 10810), False, 'from pydov.util.location import Within, Box\n'), ((11287, 11306), 'owslib.etree.etree.tostring', 'etree.tostring', (['xml'], {}), '(xml)\n', (11301, 11306), False, 'from owslib.etree import etree\n'), ((12640, 12659), 'owslib.etree.etree.tostring', 'etree.tostring', (['xml'], {}), '(xml)\n', (12654, 12659), False, 'from owslib.etree import etree\n'), ((14180, 14199), 'owslib.etree.etree.tostring', 'etree.tostring', (['xml'], {}), '(xml)\n', (14194, 14199), False, 'from owslib.etree import etree\n'), ((15923, 15942), 'owslib.etree.etree.tostring', 'etree.tostring', (['xml'], {}), '(xml)\n', (15937, 15942), False, 'from owslib.etree import etree\n'), ((18204, 18223), 'owslib.etree.etree.tostring', 'etree.tostring', (['xml'], {}), '(xml)\n', (18218, 18223), False, 'from owslib.etree import etree\n')]
|
# -*- coding: utf-8 -*-
"""
manage
~~~~~~
Flask-Script Manager
"""
import os
from flask.ext.script import Manager
from flask.ext.migrate import MigrateCommand
from fbone import create_app
from fbone.extensions import db
from fbone.utils import PROJECT_PATH, MALE
from fbone.modules.user import User, ADMIN, ACTIVE
from fbone.modules.movies import Movie
from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand
app = create_app()
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config', required=False)
manager.add_command('create_user', CreateUserCommand())
manager.add_command('delete_user', DeleteUserCommand())
manager.add_command('list_users', ListUsersCommand())
manager.add_command('db', MigrateCommand)
@manager.command
def initdb():
"""Init/reset database."""
db.drop_all()
db.create_all()
admin = User(
name=u'admin',
fullname=u'<NAME>',
email=u'<EMAIL>',
password=u'<PASSWORD>',
role_code=ADMIN,
status_code=ACTIVE,
gender_code=MALE,
bio=u'FSU Grad. Go Noles!')
db.session.add(admin)
db.session.commit()
@manager.command
def tests():
"""Run the tests."""
import pytest
exit_code = pytest.main([os.path.join(PROJECT_PATH, 'tests'), '--verbose'])
return exit_code
if __name__ == "__main__":
manager.run()
|
[
"fbone.modules.user.commands.ListUsersCommand",
"flask.ext.script.Manager",
"fbone.modules.user.commands.CreateUserCommand",
"fbone.extensions.db.create_all",
"fbone.modules.user.User",
"fbone.extensions.db.session.commit",
"os.path.join",
"fbone.extensions.db.session.add",
"fbone.create_app",
"fbone.extensions.db.drop_all",
"fbone.modules.user.commands.DeleteUserCommand"
] |
[((472, 484), 'fbone.create_app', 'create_app', ([], {}), '()\n', (482, 484), False, 'from fbone import create_app\n'), ((495, 514), 'flask.ext.script.Manager', 'Manager', (['create_app'], {}), '(create_app)\n', (502, 514), False, 'from flask.ext.script import Manager\n'), ((618, 637), 'fbone.modules.user.commands.CreateUserCommand', 'CreateUserCommand', ([], {}), '()\n', (635, 637), False, 'from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand\n'), ((674, 693), 'fbone.modules.user.commands.DeleteUserCommand', 'DeleteUserCommand', ([], {}), '()\n', (691, 693), False, 'from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand\n'), ((729, 747), 'fbone.modules.user.commands.ListUsersCommand', 'ListUsersCommand', ([], {}), '()\n', (745, 747), False, 'from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand\n'), ((860, 873), 'fbone.extensions.db.drop_all', 'db.drop_all', ([], {}), '()\n', (871, 873), False, 'from fbone.extensions import db\n'), ((878, 893), 'fbone.extensions.db.create_all', 'db.create_all', ([], {}), '()\n', (891, 893), False, 'from fbone.extensions import db\n'), ((911, 1084), 'fbone.modules.user.User', 'User', ([], {'name': 'u"""admin"""', 'fullname': 'u"""<NAME>"""', 'email': 'u"""<EMAIL>"""', 'password': 'u"""<PASSWORD>"""', 'role_code': 'ADMIN', 'status_code': 'ACTIVE', 'gender_code': 'MALE', 'bio': 'u"""FSU Grad. Go Noles!"""'}), "(name=u'admin', fullname=u'<NAME>', email=u'<EMAIL>', password=\n u'<PASSWORD>', role_code=ADMIN, status_code=ACTIVE, gender_code=MALE,\n bio=u'FSU Grad. Go Noles!')\n", (915, 1084), False, 'from fbone.modules.user import User, ADMIN, ACTIVE\n'), ((1145, 1166), 'fbone.extensions.db.session.add', 'db.session.add', (['admin'], {}), '(admin)\n', (1159, 1166), False, 'from fbone.extensions import db\n'), ((1171, 1190), 'fbone.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1188, 1190), False, 'from fbone.extensions import db\n'), ((1295, 1330), 'os.path.join', 'os.path.join', (['PROJECT_PATH', '"""tests"""'], {}), "(PROJECT_PATH, 'tests')\n", (1307, 1330), False, 'import os\n')]
|
"""
MDCrane demo
=============
.. seealso::
`Material Design spec,
Crane <https://material.io/design/material-studies/crane.html#>`
Crane is a travel app that helps users find and book travel, lodging, and
restaurant options that match their input preferences.
"""
import os
import sys
from pathlib import Path
from kivy.lang import Builder
from kivymd.app import MDApp
if getattr(sys, "frozen", False): # bundle mode with PyInstaller
os.environ["CRANE_ROOT"] = sys._MEIPASS
else:
os.environ["CRANE_ROOT"] = str(Path(__file__).parent)
KV_DIR = f"{os.path.dirname(__file__)}/libs/kv/"
for kv_file in os.listdir(KV_DIR):
with open(os.path.join(KV_DIR, kv_file), encoding="utf-8") as kv:
Builder.load_string(kv.read())
KV = """
#:import FadeTransition kivy.uix.screenmanager.FadeTransition
#:import CraneRootScreen libs.baseclass.root_screen.CraneRootScreen
ScreenManager:
transition: FadeTransition()
CraneRootScreen:
name: "crane root screen"
"""
class MDCrane(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title = "Crane"
self.icon = f"{os.environ['CRANE_ROOT']}/assets/images/logo.png"
self.theme_cls.primary_palette = "Gray"
self.theme_cls.primary_hue = "100"
def build(self):
FONT_PATH = f"{os.environ['CRANE_ROOT']}/assets/fonts/"
self.theme_cls.font_styles.update(
{
"H1": [FONT_PATH + "Raleway-Light", 96, False, -1.5],
"H2": [FONT_PATH + "Raleway-Regular", 60, False, -0.5],
"H3": [FONT_PATH + "Raleway-SemiBold", 48, False, 0],
"H4": [FONT_PATH + "Raleway-SemiBold", 34, False, 0.25],
"H5": [FONT_PATH + "Raleway-SemiBold", 24, False, 0],
"H6": [FONT_PATH + "Raleway-SemiBold", 20, False, 0.15],
"Subtitle1": [
FONT_PATH + "Raleway-Medium",
16,
False,
0.15,
],
"Subtitle2": [
FONT_PATH + "Raleway-SemiBold",
14,
False,
0.1,
],
"Body1": [FONT_PATH + "Raleway-SemiBold", 16, False, 0.5],
"Body2": [FONT_PATH + "Raleway-Regular", 14, False, 0.25],
"Button": [FONT_PATH + "Raleway-SemiBold", 14, True, 1.25],
"Caption": [
FONT_PATH + "Raleway-Medium",
12,
False,
0.4,
],
"Overline": [
FONT_PATH + "Raleway-SemiBold",
12,
True,
1.5,
],
}
)
return Builder.load_string(KV)
MDCrane().run()
|
[
"os.listdir",
"pathlib.Path",
"kivy.lang.Builder.load_string",
"os.path.join",
"os.path.dirname"
] |
[((634, 652), 'os.listdir', 'os.listdir', (['KV_DIR'], {}), '(KV_DIR)\n', (644, 652), False, 'import os\n'), ((581, 606), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (596, 606), False, 'import os\n'), ((2840, 2863), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['KV'], {}), '(KV)\n', (2859, 2863), False, 'from kivy.lang import Builder\n'), ((544, 558), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (548, 558), False, 'from pathlib import Path\n'), ((668, 697), 'os.path.join', 'os.path.join', (['KV_DIR', 'kv_file'], {}), '(KV_DIR, kv_file)\n', (680, 697), False, 'import os\n')]
|
# !/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2021-09-24
@LastEditTime: 2022-04-17
源自OpenAttack的DCESSubstitute
"""
import random
from typing import NoReturn, List, Any, Optional
import numpy as np
from utils.transformations.base import CharSubstitute
from utils.assets import fetch
from utils.misc import DEFAULTS
__all__ = [
"CharacterDCESSubstitute",
]
class CharacterDCESSubstitute(CharSubstitute):
""" """
__name__ = "CharacterDCESSubstitute"
def __init__(
self, threshold: float, random_one: bool = False, **kwargs: Any
) -> NoReturn:
""" """
super().__init__(**kwargs)
self.threshold = threshold
dces_dict = fetch("dces")
self.descs = dces_dict["descs"]
self.neigh = dces_dict["neigh"]
self.random_one = random_one
def _get_candidates(
self,
word: str,
pos_tag: Optional[str] = None,
num: Optional[int] = None,
) -> List[str]:
""" """
candidate_words = []
if self.random_one:
i = DEFAULTS.RNG.integers(0, len(word))
repl_letters = self._apply_dces(word[i], self.threshold)
if len(repl_letters) > 0:
repl_letter = random.choice(repl_letters)
candidate_word = word[:i] + repl_letter + word[i + 1 :]
candidate_words.append(candidate_word)
else:
for i in range(len(word)):
for repl_letter in self._apply_dces(word[i], self.threshold):
candidate_word = word[:i] + repl_letter + word[i + 1 :]
candidate_words.append(candidate_word)
if num:
candidate_words = candidate_words[:num]
return candidate_words
def _apply_dces(self, char: str, threshold: float) -> List[str]:
""" """
c = get_hex_string(char)
if c in self.descs:
description = self.descs[c]["description"]
else:
return []
tokens = description.split(" ")
case = "unknown"
identifiers = []
for token in tokens:
if len(token) == 1:
identifiers.append(token)
elif token == "SMALL":
case = "SMALL"
elif token == "CAPITAL":
case = "CAPITAL"
matches = []
match_ids = []
for i in identifiers:
for idx, val in self.descs.items():
desc_toks = val["description"].split(" ")
if (
i in desc_toks
and not np.any(np.in1d(desc_toks, _disallowed))
and not np.any(np.in1d(idx, _disallowed_codes))
and not int(idx, 16) > 30000
):
desc_toks = np.array(desc_toks)
case_descriptor = desc_toks[
(desc_toks == "SMALL") | (desc_toks == "CAPITAL")
]
if len(case_descriptor) > 1:
case_descriptor = case_descriptor[0]
elif len(case_descriptor) == 0:
case = "unknown"
if case == "unknown" or case == case_descriptor:
match_ids.append(idx)
matches.append(val["vec"])
if len(matches) == 0:
return []
match_vecs = np.stack(matches)
Y = match_vecs
self.neigh.fit(Y)
X = self.descs[c]["vec"].reshape(1, -1)
if Y.shape[0] > threshold:
dists, idxs = self.neigh.kneighbors(X, threshold, return_distance=True)
else:
dists, idxs = self.neigh.kneighbors(X, Y.shape[0], return_distance=True)
probs = dists.flatten()
charcodes = [match_ids[idx] for idx in idxs.flatten()]
chars = []
for idx, charcode in enumerate(charcodes):
if probs[idx] < threshold:
chars.append(chr(int(charcode, 16)))
return chars
@property
def deterministic(self) -> bool:
return not self.random_one
def extra_repr_keys(self) -> List[str]:
return super().extra_repr_keys() + [
"threshold",
"random_one",
]
_disallowed = [
"TAG",
"MALAYALAM",
"BAMUM",
"HIRAGANA",
"RUNIC",
"TAI",
"SUNDANESE",
"BATAK",
"LEPCHA",
"CHAM",
"TELUGU",
"DEVANGARAI",
"BUGINESE",
"MYANMAR",
"LINEAR",
"SYLOTI",
"PHAGS-PA",
"CHEROKEE",
"CANADIAN",
"YI",
"LYCIAN",
"HANGUL",
"KATAKANA",
"JAVANESE",
"ARABIC",
"KANNADA",
"BUHID",
"TAGBANWA",
"DESERET",
"REJANG",
"BOPOMOFO",
"PERMIC",
"OSAGE",
"TAGALOG",
"MEETEI",
"CARIAN",
"UGARITIC",
"ORIYA",
"ELBASAN",
"CYPRIOT",
"HANUNOO",
"GUJARATI",
"LYDIAN",
"MONGOLIAN",
"AVESTAN",
"MEROITIC",
"KHAROSHTHI",
"HUNGARIAN",
"KHUDAWADI",
"ETHIOPIC",
"PERSIAN",
"OSMANYA",
"ELBASAN",
"TIBETAN",
"BENGALI",
"TURKIC",
"THROWING",
"HANIFI",
"BRAHMI",
"KAITHI",
"LIMBU",
"LAO",
"CHAKMA",
"DEVANAGARI",
"ITALIC",
"CJK",
"MEDEFAIDRIN",
"DIAMOND",
"SAURASHTRA",
"ADLAM",
"DUPLOYAN",
]
_disallowed_codes = [
"1F1A4",
"A7AF",
]
def get_hex_string(ch: str) -> str:
return "{:04x}".format(ord(ch)).upper()
|
[
"random.choice",
"numpy.in1d",
"numpy.stack",
"numpy.array",
"utils.assets.fetch"
] |
[((739, 752), 'utils.assets.fetch', 'fetch', (['"""dces"""'], {}), "('dces')\n", (744, 752), False, 'from utils.assets import fetch\n'), ((3462, 3479), 'numpy.stack', 'np.stack', (['matches'], {}), '(matches)\n', (3470, 3479), True, 'import numpy as np\n'), ((1286, 1313), 'random.choice', 'random.choice', (['repl_letters'], {}), '(repl_letters)\n', (1299, 1313), False, 'import random\n'), ((2851, 2870), 'numpy.array', 'np.array', (['desc_toks'], {}), '(desc_toks)\n', (2859, 2870), True, 'import numpy as np\n'), ((2649, 2680), 'numpy.in1d', 'np.in1d', (['desc_toks', '_disallowed'], {}), '(desc_toks, _disallowed)\n', (2656, 2680), True, 'import numpy as np\n'), ((2717, 2748), 'numpy.in1d', 'np.in1d', (['idx', '_disallowed_codes'], {}), '(idx, _disallowed_codes)\n', (2724, 2748), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from absl import logging
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be a direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
# TODO(b/175426744): Data Binder will modify the uri.
logging.warning(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from a previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def _set_execution_result_if_not_empty(
executor_output: Optional[execution_result_pb2.ExecutorOutput],
execution: metadata_store_pb2.Execution) -> bool:
"""Sets execution result as a custom property of the execution."""
if executor_output and (executor_output.execution_result.result_message or
executor_output.execution_result.metadata_details or
executor_output.execution_result.code):
# TODO(b/190001754): Consider either switching to base64 encoding or using
# a proto descriptor pool to circumvent TypeError which may be raised when
# converting embedded `Any` protos.
try:
execution_lib.set_execution_result(executor_output.execution_result,
execution)
except TypeError:
logging.exception(
'Skipped setting execution_result as custom property of the '
'execution due to error')
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be a subset
of the system-generated output artifacts dict. 2. An update to a certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to a output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is a common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(proto_artifact, original_artifact,
len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(
metadata_handler, execution, contexts, output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers a new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts)
|
[
"tfx.orchestration.portable.mlmd.execution_lib.prepare_execution",
"absl.logging.exception",
"tfx.orchestration.portable.mlmd.execution_lib.put_execution",
"absl.logging.warning",
"os.path.dirname",
"tfx.orchestration.portable.mlmd.execution_lib.set_execution_result",
"tfx.types.Artifact",
"copy.deepcopy",
"typing.cast"
] |
[((3017, 3144), 'tfx.orchestration.portable.mlmd.execution_lib.put_execution', 'execution_lib.put_execution', (['metadata_handler', 'execution', 'contexts'], {'input_artifacts': 'None', 'output_artifacts': 'output_artifacts'}), '(metadata_handler, execution, contexts,\n input_artifacts=None, output_artifacts=output_artifacts)\n', (3044, 3144), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((6137, 6202), 'typing.cast', 'cast', (['MutableMapping[str, List[types.Artifact]]', 'output_artifacts'], {}), '(MutableMapping[str, List[types.Artifact]], output_artifacts)\n', (6141, 6202), False, 'from typing import List, Mapping, MutableMapping, Optional, Sequence, cast\n'), ((8078, 8183), 'tfx.orchestration.portable.mlmd.execution_lib.put_execution', 'execution_lib.put_execution', (['metadata_handler', 'execution', 'contexts'], {'output_artifacts': 'output_artifacts'}), '(metadata_handler, execution, contexts,\n output_artifacts=output_artifacts)\n', (8105, 8183), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((8915, 8981), 'tfx.orchestration.portable.mlmd.execution_lib.put_execution', 'execution_lib.put_execution', (['metadata_handler', 'execution', 'contexts'], {}), '(metadata_handler, execution, contexts)\n', (8942, 8981), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((9843, 10013), 'tfx.orchestration.portable.mlmd.execution_lib.put_execution', 'execution_lib.put_execution', (['metadata_handler', 'execution', 'contexts'], {'output_artifacts': 'output_artifacts', 'output_event_type': 'metadata_store_pb2.Event.INTERNAL_OUTPUT'}), '(metadata_handler, execution, contexts,\n output_artifacts=output_artifacts, output_event_type=metadata_store_pb2\n .Event.INTERNAL_OUTPUT)\n', (9870, 10013), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((11121, 11245), 'tfx.orchestration.portable.mlmd.execution_lib.prepare_execution', 'execution_lib.prepare_execution', (['metadata_handler', 'execution_type', 'metadata_store_pb2.Execution.RUNNING', 'exec_properties'], {}), '(metadata_handler, execution_type,\n metadata_store_pb2.Execution.RUNNING, exec_properties)\n', (11152, 11245), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((11264, 11367), 'tfx.orchestration.portable.mlmd.execution_lib.put_execution', 'execution_lib.put_execution', (['metadata_handler', 'execution', 'contexts'], {'input_artifacts': 'input_artifacts'}), '(metadata_handler, execution, contexts,\n input_artifacts=input_artifacts)\n', (11291, 11367), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((6078, 6109), 'copy.deepcopy', 'copy.deepcopy', (['output_artifacts'], {}), '(output_artifacts)\n', (6091, 6109), False, 'import copy\n'), ((1567, 1600), 'os.path.dirname', 'os.path.dirname', (['new_artifact.uri'], {}), '(new_artifact.uri)\n', (1582, 1600), False, 'import os\n'), ((2025, 2167), 'absl.logging.warning', 'logging.warning', (['"""When there is one artifact to publish, the URI of it should be identical to the URI of system generated artifact."""'], {}), "(\n 'When there is one artifact to publish, the URI of it should be identical to the URI of system generated artifact.'\n )\n", (2040, 2167), False, 'from absl import logging\n'), ((3840, 3919), 'tfx.orchestration.portable.mlmd.execution_lib.set_execution_result', 'execution_lib.set_execution_result', (['executor_output.execution_result', 'execution'], {}), '(executor_output.execution_result, execution)\n', (3874, 3919), False, 'from tfx.orchestration.portable.mlmd import execution_lib\n'), ((3989, 4101), 'absl.logging.exception', 'logging.exception', (['"""Skipped setting execution_result as custom property of the execution due to error"""'], {}), "(\n 'Skipped setting execution_result as custom property of the execution due to error'\n )\n", (4006, 4101), False, 'from absl import logging\n'), ((7520, 7567), 'tfx.types.Artifact', 'types.Artifact', (['original_artifact.artifact_type'], {}), '(original_artifact.artifact_type)\n', (7534, 7567), False, 'from tfx import types\n')]
|
# Copyright 2020 Google Research
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
'''
Imported from: https://github.com/google-research/sam
'''
import torch
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=True, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
self.base_optimizer = base_optimizer
defaults = dict(rho=rho, adaptive=adaptive, **self.base_optimizer.defaults)
super().__init__(params, defaults)
self.rho = rho
self.adaptive = adaptive
self.param_groups = self.base_optimizer.param_groups
@torch.no_grad()
def first_step(self, zero_grad=False):
if self._has_overflow(self.param_groups):
if zero_grad: self.zero_grad()
return True
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = self.rho / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_p"] = p.data.clone()
e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
if zero_grad: self.zero_grad()
return False
@torch.no_grad()
def second_step(self, zero_grad=False):
if self._has_overflow(self.param_groups):
if zero_grad: self.zero_grad()
return
for group in self.param_groups:
for p in group["params"]:
if p.grad is None: continue
p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad: self.zero_grad()
@torch.no_grad()
def step(self):
raise NotImplementedError("SAM doesn't work like the other optimizers,"
" you should first call `first_step` and the `second_step`;")
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack([
((torch.abs(p) if self.adaptive else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
@staticmethod
def _has_overflow(params):
''' Check whether the gradient overflow occurred in model parameters '''
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
for group in params:
for p in group["params"]:
if p.grad is not None and _has_inf_or_nan(p.grad.data):
return True
return False
|
[
"torch.no_grad",
"torch.abs",
"torch.pow"
] |
[((722, 737), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (735, 737), False, 'import torch\n'), ((1392, 1407), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1405, 1407), False, 'import torch\n'), ((1894, 1909), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1907, 1909), False, 'import torch\n'), ((1190, 1205), 'torch.pow', 'torch.pow', (['p', '(2)'], {}), '(p, 2)\n', (1199, 1205), False, 'import torch\n'), ((2353, 2365), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (2362, 2365), False, 'import torch\n')]
|
import os
from pathlib import Path
import requests
import shutil
import sys
from distutils.version import LooseVersion
import time
from tqdm import tqdm
from docly.parser import parser as py_parser
from docly.tokenizers import tokenize_code_string
from docly import __version__
# from c2nl.objects import Code
UPDATE_CHECK_URL = "http://3.80.2.138:8584/vercheck/check-version/"
# UPDATE_CHECK_URL = "http://127.0.0.1:5000/vercheck/check-version/"
interaction_cache = lambda : Path(Path.home() / ".docly" / "interaction_cache")
CACHE_DIR = (Path().home() / ".docly" / "file_cache")
cache_exists = lambda : CACHE_DIR.exists()
make_cache_dir = lambda : os.mkdir(str(CACHE_DIR))
def _compare_installed_version_with_latest(v1, v2):
try:
current_version = LooseVersion(v1)
latest_version = LooseVersion(v2)
assert current_version == latest_version
return True
except AssertionError:
return False
def look_for_update():
with requests.sessions.Session() as s:
try:
r = s.get(UPDATE_CHECK_URL, timeout=2)
r.raise_for_status()
if not _compare_installed_version_with_latest(__version__, r.text):
i_c = interaction_cache()
return True
return False
except Exception:
i_c = interaction_cache()
if not i_c.exists():
os.mkdir(i_c)
if not (i_c / "icache.txt").exists():
with open((i_c / "icache.txt"), "w") as f:
f.write(str(int(time.time())) + "\n")
else:
with open((i_c / "icache.txt"), "a") as f:
f.write(str(int(time.time())) + "\n")
return False
def is_dir(base_path):
if isinstance(base_path, Path):
return base_path.is_dir()
elif isinstance(base_path, str):
return Path(base_path).is_dir()
else:
return False
def is_python_file(file_path):
if isinstance(file_path, Path):
return file_path.suffix == ".py"
elif isinstance(file_path, str):
return Path(file_path).suffix == ".py"
else:
return False
def is_ipynb_notebook(file_path):
if isinstance(file_path, Path):
return file_path.suffix == ".ipynb"
elif isinstance(file_path, str):
return Path(file_path).suffix == ".ipynb"
else:
return False
def download_from_url(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=dst.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def check_out_path(target_path: Path):
""""
This function recursively yields all contents of a pathlib.Path object
"""
yield target_path
for file in target_path.iterdir():
if file.is_dir():
yield from check_out_path(file)
else:
yield file.absolute()
def process_file(file_path: Path, ts_lib_path: str, use_old=False):
result, parser_obj = py_parser.parse(file_path, ts_lib_path)
func_and_params = parser_obj.get_all_function_names_with_params()
if result:
for func_name, data in py_parser.get_func_body_and_docstr(parser_obj):
# print(py_toeknizer.tokenize_code_string(func_body))
# code.tokens = tokenizer.tokenize(func_body).data
# code.text = func_body
(func_body, docstr), start, end = data
ret_start = (start[0]+1, start[1])
params = func_and_params[func_name]
code_str = [tokenize_code_string(func_body)] if use_old else func_body
yield code_str, params, ret_start, func_name, docstr.strip()
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes", "no", or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '{}}'".format(default))
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
|
[
"os.path.exists",
"requests.sessions.Session",
"os.path.getsize",
"docly.parser.parser.get_func_body_and_docstr",
"pathlib.Path",
"pathlib.Path.home",
"requests.get",
"requests.head",
"docly.tokenizers.tokenize_code_string",
"os.mkdir",
"distutils.version.LooseVersion",
"time.time",
"docly.parser.parser.parse"
] |
[((2597, 2616), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (2611, 2616), False, 'import os\n'), ((2946, 2992), 'requests.get', 'requests.get', (['url'], {'headers': 'header', 'stream': '(True)'}), '(url, headers=header, stream=True)\n', (2958, 2992), False, 'import requests\n'), ((3617, 3656), 'docly.parser.parser.parse', 'py_parser.parse', (['file_path', 'ts_lib_path'], {}), '(file_path, ts_lib_path)\n', (3632, 3656), True, 'from docly.parser import parser as py_parser\n'), ((770, 786), 'distutils.version.LooseVersion', 'LooseVersion', (['v1'], {}), '(v1)\n', (782, 786), False, 'from distutils.version import LooseVersion\n'), ((812, 828), 'distutils.version.LooseVersion', 'LooseVersion', (['v2'], {}), '(v2)\n', (824, 828), False, 'from distutils.version import LooseVersion\n'), ((980, 1007), 'requests.sessions.Session', 'requests.sessions.Session', ([], {}), '()\n', (1005, 1007), False, 'import requests\n'), ((2639, 2659), 'os.path.getsize', 'os.path.getsize', (['dst'], {}), '(dst)\n', (2654, 2659), False, 'import os\n'), ((3773, 3819), 'docly.parser.parser.get_func_body_and_docstr', 'py_parser.get_func_body_and_docstr', (['parser_obj'], {}), '(parser_obj)\n', (3807, 3819), True, 'from docly.parser import parser as py_parser\n'), ((485, 496), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (494, 496), False, 'from pathlib import Path\n'), ((545, 551), 'pathlib.Path', 'Path', ([], {}), '()\n', (549, 551), False, 'from pathlib import Path\n'), ((2544, 2562), 'requests.head', 'requests.head', (['url'], {}), '(url)\n', (2557, 2562), False, 'import requests\n'), ((1399, 1412), 'os.mkdir', 'os.mkdir', (['i_c'], {}), '(i_c)\n', (1407, 1412), False, 'import os\n'), ((1887, 1902), 'pathlib.Path', 'Path', (['base_path'], {}), '(base_path)\n', (1891, 1902), False, 'from pathlib import Path\n'), ((2105, 2120), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (2109, 2120), False, 'from pathlib import Path\n'), ((2336, 2351), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (2340, 2351), False, 'from pathlib import Path\n'), ((4157, 4188), 'docly.tokenizers.tokenize_code_string', 'tokenize_code_string', (['func_body'], {}), '(func_body)\n', (4177, 4188), False, 'from docly.tokenizers import tokenize_code_string\n'), ((1558, 1569), 'time.time', 'time.time', ([], {}), '()\n', (1567, 1569), False, 'import time\n'), ((1693, 1704), 'time.time', 'time.time', ([], {}), '()\n', (1702, 1704), False, 'import time\n')]
|
#importing necessary modules
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Data and labels
Xtrain = [[182, 80, 34], [176, 70, 33], [161, 60, 28], [154, 55, 27], [166, 63, 30], [189, 90, 36], [175, 63, 28], [177, 71, 30], [159, 52, 27], [171, 72, 32], [181, 85, 34]]
Ytrain = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']
Xval = [[163, 62, 28], [182, 80, 35], [150, 50, 24], [160, 57, 27], [175, 62, 30], [183, 67, 32], [177, 64, 29], [164, 62, 29], [157, 53, 23], [170, 73, 32], [169, 59, 29]]
Yval = ['female', 'male', 'female', 'female', 'male', 'male', 'female', 'female',
'female', 'male', 'female']
# initializing the ML models
knn = KNeighborsClassifier()
perceptron = Perceptron()
# Fitting the models
knn.fit(Xtrain, Ytrain)
perceptron.fit(Xtrain, Ytrain)
# Testing using our input data
pred_knn = knn.predict(Xval)
acc_knn = accuracy_score(Yval, pred_knn) * 100
print(f'Accuracy for knn: {acc_knn}')
pred_perceptron = perceptron.predict(Xval)
acc_perceptron = accuracy_score(Yval, pred_perceptron) * 100
print(f'Accuracy for perceptron: {acc_perceptron}')
# The best classifier out of the two models
index = np.argmax([acc_knn, acc_perceptron])
#argmax function assigns the index of the maximum value to the variable
classifiers = {0: 'KNN', 1:'PER'}
print(f'Best gender classifier is {classifiers[index]}')
|
[
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.Perceptron",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.argmax"
] |
[((815, 837), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (835, 837), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((851, 863), 'sklearn.linear_model.Perceptron', 'Perceptron', ([], {}), '()\n', (861, 863), False, 'from sklearn.linear_model import Perceptron\n'), ((1297, 1333), 'numpy.argmax', 'np.argmax', (['[acc_knn, acc_perceptron]'], {}), '([acc_knn, acc_perceptron])\n', (1306, 1333), True, 'import numpy as np\n'), ((1012, 1042), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Yval', 'pred_knn'], {}), '(Yval, pred_knn)\n', (1026, 1042), False, 'from sklearn.metrics import accuracy_score\n'), ((1148, 1185), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Yval', 'pred_perceptron'], {}), '(Yval, pred_perceptron)\n', (1162, 1185), False, 'from sklearn.metrics import accuracy_score\n')]
|
# Copyright 2022 Xanadu Quantum Technologies Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions for performing quantum circuit cutting.
"""
import copy
import inspect
import string
import uuid
import warnings
from collections.abc import Sequence as SequenceType
from dataclasses import InitVar, dataclass
from functools import partial
from itertools import compress, product
from pathlib import Path
from typing import Any, Callable, ClassVar, Dict, List, Optional, Sequence, Tuple, Union
from networkx import MultiDiGraph, has_path, weakly_connected_components
import pennylane as qml
from pennylane import apply, expval
from pennylane import numpy as np
from pennylane.grouping import string_to_pauli_word
from pennylane.measurements import Expectation, MeasurementProcess, Sample
from pennylane.operation import Operation, Operator, Tensor
from pennylane.ops.qubit.non_parametric_ops import WireCut
from pennylane.tape import QuantumTape
from pennylane.wires import Wires
from .batch_transform import batch_transform
class MeasureNode(Operation):
"""Placeholder node for measurement operations"""
num_wires = 1
grad_method = None
def __init__(self, *params, wires=None, do_queue=True, id=None):
id = id or str(uuid.uuid4())
super().__init__(*params, wires=wires, do_queue=do_queue, id=id)
class PrepareNode(Operation):
"""Placeholder node for state preparations"""
num_wires = 1
grad_method = None
def __init__(self, *params, wires=None, do_queue=True, id=None):
id = id or str(uuid.uuid4())
super().__init__(*params, wires=wires, do_queue=do_queue, id=id)
def replace_wire_cut_node(node: WireCut, graph: MultiDiGraph):
"""
Replace a :class:`~.WireCut` node in the graph with a :class:`~.MeasureNode`
and :class:`~.PrepareNode`.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
node (WireCut): the :class:`~.WireCut` node to be replaced with a :class:`~.MeasureNode`
and :class:`~.PrepareNode`
graph (nx.MultiDiGraph): the graph containing the node to be replaced
**Example**
Consider the following circuit with a manually-placed wire cut:
.. code-block:: python
wire_cut = qml.WireCut(wires=0)
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut)
qml.RY(0.5, wires=0)
qml.expval(qml.PauliZ(0))
We can find the circuit graph and remove the wire cut node using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_node(wire_cut, graph)
"""
predecessors = graph.pred[node]
successors = graph.succ[node]
predecessor_on_wire = {}
for op, data in predecessors.items():
for d in data.values():
wire = d["wire"]
predecessor_on_wire[wire] = op
successor_on_wire = {}
for op, data in successors.items():
for d in data.values():
wire = d["wire"]
successor_on_wire[wire] = op
order = graph.nodes[node]["order"]
graph.remove_node(node)
for wire in node.wires:
predecessor = predecessor_on_wire.get(wire, None)
successor = successor_on_wire.get(wire, None)
meas = MeasureNode(wires=wire)
prep = PrepareNode(wires=wire)
# We are introducing a degeneracy in the order of the measure and prepare nodes
# here but the order can be inferred as MeasureNode always precedes
# the corresponding PrepareNode
graph.add_node(meas, order=order)
graph.add_node(prep, order=order)
graph.add_edge(meas, prep, wire=wire)
if predecessor is not None:
graph.add_edge(predecessor, meas, wire=wire)
if successor is not None:
graph.add_edge(prep, successor, wire=wire)
def replace_wire_cut_nodes(graph: MultiDiGraph):
"""
Replace each :class:`~.WireCut` node in the graph with a
:class:`~.MeasureNode` and :class:`~.PrepareNode`.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): The graph containing the :class:`~.WireCut` nodes
to be replaced
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the circuit graph and remove all the wire cut nodes using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
"""
for op in list(graph.nodes):
if isinstance(op, WireCut):
replace_wire_cut_node(op, graph)
def _add_operator_node(graph: MultiDiGraph, op: Operator, order: int, wire_latest_node: dict):
"""
Helper function to add operators as nodes during tape to graph conversion.
"""
graph.add_node(op, order=order)
for wire in op.wires:
if wire_latest_node[wire] is not None:
parent_op = wire_latest_node[wire]
graph.add_edge(parent_op, op, wire=wire)
wire_latest_node[wire] = op
def tape_to_graph(tape: QuantumTape) -> MultiDiGraph:
"""
Converts a quantum tape to a directed multigraph.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): tape to be converted into a directed multigraph
Returns:
nx.MultiDiGraph: a directed multigraph that captures the circuit structure
of the input tape
**Example**
Consider the following tape:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.9, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
Its corresponding circuit graph can be found using
>>> qml.transforms.qcut.tape_to_graph(tape)
<networkx.classes.multidigraph.MultiDiGraph at 0x7fe41cbd7210>
"""
graph = MultiDiGraph()
wire_latest_node = {w: None for w in tape.wires}
for order, op in enumerate(tape.operations):
_add_operator_node(graph, op, order, wire_latest_node)
order += 1 # pylint: disable=undefined-loop-variable
for m in tape.measurements:
obs = getattr(m, "obs", None)
if obs is not None and isinstance(obs, Tensor):
if m.return_type is Sample:
raise ValueError(
"Sampling from tensor products of observables "
"is not supported in circuit cutting"
)
for o in obs.obs:
m_ = MeasurementProcess(m.return_type, obs=o)
_add_operator_node(graph, m_, order, wire_latest_node)
elif m.return_type is Sample and obs is None:
for w in m.wires:
s_ = qml.sample(qml.Projector([1], wires=w))
_add_operator_node(graph, s_, order, wire_latest_node)
else:
_add_operator_node(graph, m, order, wire_latest_node)
order += 1
return graph
# pylint: disable=too-many-branches
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a graph into a collection of subgraphs as well as returning
the communication (`quotient <https://en.wikipedia.org/wiki/Quotient_graph>`__)
graph.
The input ``graph`` is fragmented by disconnecting each :class:`~.MeasureNode` and
:class:`~.PrepareNode` pair and finding the resultant disconnected subgraph fragments.
Each node of the communication graph represents a subgraph fragment and the edges
denote the flow of qubits between fragments due to the removed :class:`~.MeasureNode` and
:class:`~.PrepareNode` pairs.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
Tuple[Tuple[nx.MultiDiGraph], nx.MultiDiGraph]: the subgraphs of the cut graph
and the communication graph.
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
>>> qml.transforms.qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
graph_copy = graph.copy()
cut_edges = []
measure_nodes = [n for n in graph.nodes if isinstance(n, MeasurementProcess)]
for node1, node2, wire_key in graph.edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2, wire_key))
graph_copy.remove_edge(node1, node2, key=wire_key)
subgraph_nodes = weakly_connected_components(graph_copy)
subgraphs = tuple(MultiDiGraph(graph_copy.subgraph(n)) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2, _ in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
if start_fragment != end_fragment:
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
else:
# The MeasureNode and PrepareNode pair live in the same fragment and did not result
# in a disconnection. We can therefore remove these nodes. Note that we do not need
# to worry about adding back an edge between the predecessor to node1 and the successor
# to node2 because our next step is to convert the fragment circuit graphs to tapes,
# a process that does not depend on edge connections in the subgraph.
subgraphs[start_fragment].remove_node(node1)
subgraphs[end_fragment].remove_node(node2)
terminal_indices = [i for i, s in enumerate(subgraphs) for n in measure_nodes if s.has_node(n)]
subgraphs_connected_to_measurements = []
subgraphs_indices_to_remove = []
prepare_nodes_removed = []
for i, s in enumerate(subgraphs):
if any(has_path(communication_graph, i, t) for t in terminal_indices):
subgraphs_connected_to_measurements.append(s)
else:
subgraphs_indices_to_remove.append(i)
prepare_nodes_removed.extend([n for n in s.nodes if isinstance(n, PrepareNode)])
measure_nodes_to_remove = [
m for p in prepare_nodes_removed for m, p_, _ in cut_edges if p is p_
]
communication_graph.remove_nodes_from(subgraphs_indices_to_remove)
for m in measure_nodes_to_remove:
for s in subgraphs_connected_to_measurements:
if s.has_node(m):
s.remove_node(m)
return subgraphs_connected_to_measurements, communication_graph
def _find_new_wire(wires: Wires) -> int:
"""Finds a new wire label that is not in ``wires``."""
ctr = 0
while ctr in wires:
ctr += 1
return ctr
# pylint: disable=protected-access
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding :class:`~.QuantumTape`.
To account for the possibility of needing to perform mid-circuit measurements, if any operations
follow a :class:`MeasureNode` operation on a given wire then these operations are mapped to a
new wire.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): directed multigraph to be converted to a tape
Returns:
QuantumTape: the quantum tape corresponding to the input graph
**Example**
Consider the following circuit:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.transforms.qcut.MeasureNode(wires=1)
qml.transforms.qcut.PrepareNode(wires=1)
qml.CNOT(wires=[1, 0])
qml.expval(qml.PauliZ(0))
This circuit contains operations that follow a :class:`~.MeasureNode`. These operations will
subsequently act on wire ``2`` instead of wire ``1``:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> tape = qml.transforms.qcut.graph_to_tape(graph)
>>> print(tape.draw())
0: ──RX(0.4)──────╭C───────────────╭X──┤ ⟨Z⟩
1: ──RY(0.5)──────╰X──MeasureNode──│───┤
2: ──PrepareNode───────────────────╰C──┤
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
reverse_wire_map = {v: k for k, v in wire_map.items()}
copy_ops = [copy.copy(op) for _, op in ordered_ops if not isinstance(op, MeasurementProcess)]
copy_meas = [copy.copy(op) for _, op in ordered_ops if isinstance(op, MeasurementProcess)]
observables = []
with QuantumTape() as tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
# TODO: find a better way to update operation wires
op._wires = new_wires
apply(op)
if isinstance(op, MeasureNode):
assert len(op.wires) == 1
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
original_wire = reverse_wire_map[measured_wire]
wire_map[original_wire] = new_wire
reverse_wire_map[new_wire] = original_wire
if copy_meas:
return_types = set(meas.return_type for meas in copy_meas)
if len(return_types) > 1:
raise ValueError(
"Only a single return type can be used for measurement "
"nodes in graph_to_tape"
)
return_type = return_types.pop()
if return_type not in {Sample, Expectation}:
raise ValueError(
"Invalid return type. Only expectation value and sampling measurements "
"are supported in graph_to_tape"
)
for meas in copy_meas:
obs = meas.obs
obs._wires = Wires([wire_map[w] for w in obs.wires])
observables.append(obs)
if return_type is Sample:
apply(meas)
if return_type is Expectation:
if len(observables) > 1:
qml.expval(Tensor(*observables))
else:
qml.expval(obs)
return tape
def _get_measurements(
group: Sequence[Operator], measurements: Sequence[MeasurementProcess]
) -> List[MeasurementProcess]:
"""Pairs each observable in ``group`` with the circuit ``measurements``.
Only a single measurement of an expectation value is currently supported
in ``measurements``.
Args:
group (Sequence[Operator]): a collection of observables
measurements (Sequence[MeasurementProcess]): measurements from the circuit
Returns:
List[MeasurementProcess]: the expectation values of ``g @ obs``, where ``g`` is iterated
over ``group`` and ``obs`` is the observable composing the single measurement
in ``measurements``
"""
if len(group) == 0:
# This ensures the measurements of the original tape are carried over to the
# following tape configurations in the absence of any MeasureNodes in the fragment
return measurements
n_measurements = len(measurements)
if n_measurements > 1:
raise ValueError(
"The circuit cutting workflow only supports circuits with a single output "
"measurement"
)
if n_measurements == 0:
return [expval(g) for g in group]
measurement = measurements[0]
if measurement.return_type is not Expectation:
raise ValueError(
"The circuit cutting workflow only supports circuits with expectation "
"value measurements"
)
obs = measurement.obs
return [expval(copy.copy(obs) @ g) for g in group]
def _prep_zero_state(wire):
qml.Identity(wire)
def _prep_one_state(wire):
qml.PauliX(wire)
def _prep_plus_state(wire):
qml.Hadamard(wire)
def _prep_minus_state(wire):
qml.PauliX(wire)
qml.Hadamard(wire)
def _prep_iplus_state(wire):
qml.Hadamard(wire)
qml.S(wires=wire)
def _prep_iminus_state(wire):
qml.PauliX(wire)
qml.Hadamard(wire)
qml.S(wires=wire)
PREPARE_SETTINGS = [_prep_zero_state, _prep_one_state, _prep_plus_state, _prep_iplus_state]
def expand_fragment_tape(
tape: QuantumTape,
) -> Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]:
"""
Expands a fragment tape into a sequence of tapes for each configuration of the contained
:class:`MeasureNode` and :class:`PrepareNode` operations.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the fragment tape containing :class:`MeasureNode` and
:class:`PrepareNode` operations to be expanded
Returns:
Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]: the
tapes corresponding to each configuration and the order of preparation nodes and
measurement nodes used in the expansion
**Example**
Consider the following circuit, which contains a :class:`~.MeasureNode` and
:class:`~.PrepareNode` operation:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.transforms.qcut.PrepareNode(wires=0)
qml.RX(0.5, wires=0)
qml.transforms.qcut.MeasureNode(wires=0)
We can expand over the measurement and preparation nodes using:
>>> tapes, prep, meas = qml.transforms.qcut.expand_fragment_tape(tape)
>>> for t in tapes:
... print(qml.drawer.tape_text(t, decimals=1))
0: ──I──RX(0.5)─┤ <I> <Z>
0: ──I──RX(0.5)─┤ <X>
0: ──I──RX(0.5)─┤ <Y>
0: ──X──RX(0.5)─┤ <I> <Z>
0: ──X──RX(0.5)─┤ <X>
0: ──X──RX(0.5)─┤ <Y>
0: ──H──RX(0.5)─┤ <I> <Z>
0: ──H──RX(0.5)─┤ <X>
0: ──H──RX(0.5)─┤ <Y>
0: ──H──S──RX(0.5)─┤ <I> <Z>
0: ──H──S──RX(0.5)─┤ <X>
0: ──H──S──RX(0.5)─┤ <Y>
"""
prepare_nodes = [o for o in tape.operations if isinstance(o, PrepareNode)]
measure_nodes = [o for o in tape.operations if isinstance(o, MeasureNode)]
wire_map = {mn.wires[0]: i for i, mn in enumerate(measure_nodes)}
n_meas = len(measure_nodes)
if n_meas >= 1:
measure_combinations = qml.grouping.partition_pauli_group(len(measure_nodes))
else:
measure_combinations = [[""]]
tapes = []
for prepare_settings in product(range(len(PREPARE_SETTINGS)), repeat=len(prepare_nodes)):
for measure_group in measure_combinations:
if n_meas >= 1:
group = [
string_to_pauli_word(paulis, wire_map=wire_map) for paulis in measure_group
]
else:
group = []
prepare_mapping = {
n: PREPARE_SETTINGS[s] for n, s in zip(prepare_nodes, prepare_settings)
}
with QuantumTape() as tape_:
for op in tape.operations:
if isinstance(op, PrepareNode):
w = op.wires[0]
prepare_mapping[op](w)
elif not isinstance(op, MeasureNode):
apply(op)
with qml.tape.stop_recording():
measurements = _get_measurements(group, tape.measurements)
for meas in measurements:
apply(meas)
tapes.append(tape_)
return tapes, prepare_nodes, measure_nodes
MC_STATES = [
_prep_zero_state,
_prep_one_state,
_prep_plus_state,
_prep_minus_state,
_prep_iplus_state,
_prep_iminus_state,
_prep_zero_state,
_prep_one_state,
]
def _identity(wire):
qml.sample(qml.Identity(wires=wire))
def _pauliX(wire):
qml.sample(qml.PauliX(wires=wire))
def _pauliY(wire):
qml.sample(qml.PauliY(wires=wire))
def _pauliZ(wire):
qml.sample(qml.PauliZ(wires=wire))
MC_MEASUREMENTS = [
_identity,
_identity,
_pauliX,
_pauliX,
_pauliY,
_pauliY,
_pauliZ,
_pauliZ,
]
def expand_fragment_tapes_mc(
tapes: Sequence[QuantumTape], communication_graph: MultiDiGraph, shots: int
) -> Tuple[List[QuantumTape], np.ndarray]:
"""
Expands fragment tapes into a sequence of random configurations of the contained pairs of
:class:`MeasureNode` and :class:`PrepareNode` operations.
For each pair, a measurement is sampled from
the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates.
A settings array is also given which tracks the configuration pairs. Since each of the 4
measurements has 2 possible eigenvectors, all configurations can be uniquely identified by
8 values. The number of rows is determined by the number of cuts and the number of columns
is determined by the number of shots.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`~.cut_circuit_mc` transform for more details.
Args:
tapes (Sequence[QuantumTape]): the fragment tapes containing :class:`MeasureNode` and
:class:`PrepareNode` operations to be expanded
communication_graph (nx.MultiDiGraph): the communication (quotient) graph of the fragmented
full graph
shots (int): number of shots
Returns:
Tuple[List[QuantumTape], np.ndarray]: the tapes corresponding to each configuration and the
settings that track each configuration pair
**Example**
Consider the following circuit that contains a sample measurement:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
We can generate the fragment tapes using the following workflow:
>>> g = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(g)
>>> subgraphs, communication_graph = qml.transforms.qcut.fragment_graph(g)
>>> tapes = [qml.transforms.qcut.graph_to_tape(sg) for sg in subgraphs]
We can then expand over the measurement and preparation nodes to generate random
configurations using:
.. code-block:: python
>>> configs, settings = qml.transforms.qcut.expand_fragment_tapes_mc(tapes, communication_graph, 3)
>>> print(settings)
[[1 6 2]]
>>> for i, (c1, c2) in enumerate(zip(configs[0], configs[1])):
... print(f"config {i}:")
... print(c1.draw())
... print("")
... print(c2.draw())
... print("")
...
config 0:
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[I]
1: ──X─╭C─┤ Sample[|1⟩⟨1|]
2: ────╰X─┤ Sample[|1⟩⟨1|]
config 1:
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[Z]
1: ──I─╭C─┤ Sample[|1⟩⟨1|]
2: ────╰X─┤ Sample[|1⟩⟨1|]
config 2:
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[X]
1: ──H─╭C─┤ Sample[|1⟩⟨1|]
2: ────╰X─┤ Sample[|1⟩⟨1|]
"""
pairs = [e[-1] for e in communication_graph.edges.data("pair")]
settings = np.random.choice(range(8), size=(len(pairs), shots), replace=True)
meas_settings = {pair[0].id: setting for pair, setting in zip(pairs, settings)}
prep_settings = {pair[1].id: setting for pair, setting in zip(pairs, settings)}
all_configs = []
for tape in tapes:
frag_config = []
for shot in range(shots):
with qml.tape.QuantumTape() as new_tape:
for op in tape.operations:
w = op.wires[0]
if isinstance(op, PrepareNode):
MC_STATES[prep_settings[op.id][shot]](w)
elif not isinstance(op, MeasureNode):
qml.apply(op)
for meas in tape.measurements:
qml.apply(meas)
for op in tape.operations:
meas_w = op.wires[0]
if isinstance(op, MeasureNode):
MC_MEASUREMENTS[meas_settings[op.id][shot]](meas_w)
frag_config.append(new_tape)
all_configs.append(frag_config)
return all_configs, settings
def _reshape_results(results: Sequence, shots: int) -> List[List]:
"""
Helper function to reshape ``results`` into a two-dimensional nested list whose number of rows
is determined by the number of shots and whose number of columns is determined by the number of
cuts.
"""
results = [qml.math.flatten(r) for r in results]
results = [results[i : i + shots] for i in range(0, len(results), shots)]
results = list(map(list, zip(*results))) # calculate list-based transpose
return results
def qcut_processing_fn_sample(
results: Sequence, communication_graph: MultiDiGraph, shots: int
) -> List:
"""
Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>`
transform. This removes superfluous mid-circuit measurement samples from fragment
circuit outputs.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details.
Args:
results (Sequence): a collection of sample-based execution results generated from the
random expansion of circuit fragments over measurement and preparation node configurations
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
shots (int): the number of shots
Returns:
List[tensor_like]: the sampled output for all terminal measurements over the number of shots given
"""
res0 = results[0]
results = _reshape_results(results, shots)
out_degrees = [d for _, d in communication_graph.out_degree]
samples = []
for result in results:
sample = []
for fragment_result, out_degree in zip(result, out_degrees):
sample.append(fragment_result[: -out_degree or None])
samples.append(np.hstack(sample))
return [qml.math.convert_like(np.array(samples), res0)]
def qcut_processing_fn_mc(
results: Sequence,
communication_graph: MultiDiGraph,
settings: np.ndarray,
shots: int,
classical_processing_fn: callable,
):
"""
Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>`
transform. This takes a user-specified classical function to act on bitstrings and
generates an expectation value.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details.
Args:
results (Sequence): a collection of sample-based execution results generated from the
random expansion of circuit fragments over measurement and preparation node configurations
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
settings (np.ndarray): Each element is one of 8 unique values that tracks the specific
measurement and preparation operations over all configurations. The number of rows is determined
by the number of cuts and the number of columns is determined by the number of shots.
shots (int): the number of shots
classical_processing_fn (callable): A classical postprocessing function to be applied to
the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires``
and the output should be a single number within the interval :math:`[-1, 1]`.
Returns:
float or tensor_like: the expectation value calculated in accordance to Eq. (35) of
`Peng et al. <https://arxiv.org/abs/1904.00102>`__
"""
res0 = results[0]
results = _reshape_results(results, shots)
out_degrees = [d for _, d in communication_graph.out_degree]
evals = (0.5, 0.5, 0.5, -0.5, 0.5, -0.5, 0.5, -0.5)
expvals = []
for result, setting in zip(results, settings.T):
sample_terminal = []
sample_mid = []
for fragment_result, out_degree in zip(result, out_degrees):
sample_terminal.append(fragment_result[: -out_degree or None])
sample_mid.append(fragment_result[-out_degree or len(fragment_result) :])
sample_terminal = np.hstack(sample_terminal)
sample_mid = np.hstack(sample_mid)
assert set(sample_terminal).issubset({np.array(0), np.array(1)})
assert set(sample_mid).issubset({np.array(-1), np.array(1)})
# following Eq.(35) of Peng et.al: https://arxiv.org/abs/1904.00102
f = classical_processing_fn(sample_terminal)
if not -1 <= f <= 1:
raise ValueError(
"The classical processing function supplied must "
"give output in the interval [-1, 1]"
)
sigma_s = np.prod(sample_mid)
t_s = f * sigma_s
c_s = np.prod([evals[s] for s in setting])
K = len(sample_mid)
expvals.append(8**K * c_s * t_s)
return qml.math.convert_like(np.mean(expvals), res0)
@batch_transform
def cut_circuit_mc(
tape: QuantumTape,
classical_processing_fn: Optional[callable] = None,
auto_cutter: Union[bool, Callable] = False,
max_depth: int = 1,
shots: Optional[int] = None,
device_wires: Optional[Wires] = None,
**kwargs,
) -> Tuple[Tuple[QuantumTape], Callable]:
"""
Cut up a circuit containing sample measurements into smaller fragments using a
Monte Carlo method.
Following the approach of `Peng et al. <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.150504>`__,
strategic placement of :class:`~.WireCut` operations can allow a quantum circuit to be split
into disconnected circuit fragments. A circuit containing sample measurements can be cut and
processed using Monte Carlo (MC) methods. This transform employs MC methods to allow for sampled measurement
outcomes to be recombined to full bitstrings and, if a classical processing function is supplied,
an expectation value will be evaluated.
Args:
tape (QuantumTape): the tape of the full circuit to be cut
classical_processing_fn (callable): A classical postprocessing function to be applied to
the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires``,
and the output should be a single number within the interval :math:`[-1, 1]`.
If not supplied, the transform will output samples.
auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default
:func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that
takes an input graph and returns a list of edges to be cut based on a given set of
constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to
be installed using ``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from source for Windows users.
max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts.
Only applicable when transforming a QNode.
shots (int): Number of shots. When transforming a QNode, this argument is
set by the device's ``shots`` value or at QNode call time (if provided).
Required when transforming a tape.
device_wires (Wires): Wires of the device that the cut circuits are to be run on.
When transforming a QNode, this argument is optional and will be set to the
QNode's device wires. Required when transforming a tape.
kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument.
For the default KaHyPar cutter, please refer to the docstring of functions
:func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments.
Returns:
Callable: Function which accepts the same arguments as the QNode.
When called, this function will sample from the partitioned circuit fragments
and combine the results using a Monte Carlo method.
**Example**
The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation and a :func:`~.sample`
measurement. When decorated with ``@qml.cut_circuit_mc``, we can cut the circuit into two
:math:`2`-qubit fragments:
.. code-block:: python
dev = qml.device("default.qubit", wires=2, shots=1000)
@qml.cut_circuit_mc
@qml.qnode(dev)
def circuit(x):
qml.RX(0.89, wires=0)
qml.RY(0.5, wires=1)
qml.RX(1.3, wires=2)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.RX(x, wires=0)
qml.RY(0.7, wires=1)
qml.RX(2.3, wires=2)
return qml.sample(wires=[0, 2])
we can then execute the circuit as usual by calling the QNode:
>>> x = 0.3
>>> circuit(x)
tensor([[1, 1],
[0, 1],
[0, 1],
...,
[0, 1],
[0, 1],
[0, 1]], requires_grad=True)
Furthermore, the number of shots can be temporarily altered when calling
the qnode:
>>> results = circuit(x, shots=123)
>>> results.shape
(123, 2)
Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the
``auto_cutter`` option can be enabled to make attempts in finding such a optimal cut. The
following examples shows this capability on the same circuit as above but with the
:class:`~.WireCut` removed:
.. code-block:: python
@qml.cut_circuit_mc(auto_cutter=True)
@qml.qnode(dev)
def circuit(x):
qml.RX(0.89, wires=0)
qml.RY(0.5, wires=1)
qml.RX(1.3, wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.RX(x, wires=0)
qml.RY(0.7, wires=1)
qml.RX(2.3, wires=2)
return qml.sample(wires=[0, 2])
>>> results = circuit(x, shots=123)
>>> results.shape
(123, 2)
.. UsageDetails::
Manually placing :class:`~.WireCut` operations and decorating the QNode with the
``cut_circuit_mc()`` batch transform is the suggested entrypoint into sampling-based
circuit cutting using the Monte Carlo method. However,
advanced users also have the option to work directly with a :class:`~.QuantumTape` and
manipulate the tape to perform circuit cutting using the below functionality:
.. autosummary::
:toctree:
~transforms.qcut.tape_to_graph
~transforms.qcut.find_and_place_cuts
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tapes_mc
~transforms.qcut.qcut_processing_fn_sample
~transforms.qcut.qcut_processing_fn_mc
The following shows how these elementary steps are combined as part of the
``cut_circuit_mc()`` transform.
Consider the circuit below:
.. code-block:: python
np.random.seed(42)
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=1)
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
>>> print(tape.draw())
0: ──H─╭C───────────┤ ╭Sample
1: ────╰X──X──//─╭C─┤ ├Sample
2: ──────────────╰X─┤ ╰Sample
To cut the circuit, we first convert it to its graph representation:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use
:func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut
given the device constraints. Using the same circuit as above but with the
:class:`~.WireCut` removed, a slightly different cut with identical cost can be discovered
and placed into the circuit with automatic cutting:
.. code-block:: python
with qml.tape.QuantumTape() as uncut_tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
... graph=qml.transforms.qcut.tape_to_graph(uncut_tape),
... cut_strategy=qml.transforms.qcut.CutStrategy(max_free_wires=2),
... )
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──H─╭C───────────┤ Sample[|1⟩⟨1|]
1: ────╰X──//──X─╭C─┤ Sample[|1⟩⟨1|]
2: ──────────────╰X─┤ Sample[|1⟩⟨1|]
Our next step, using the original manual cut placement, is to remove the :class:`~.WireCut`
nodes in the graph and replace with :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs.
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that
allow us to cut the circuit graph and then randomly select measurement and preparation
configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart
the graph into disconnected components as well as returning the
`communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__
detailing the connectivity between the components.
>>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph)
We now convert the ``fragments`` back to :class:`~.QuantumTape` objects
>>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments]
The circuit fragments can now be visualized:
>>> print(fragment_tapes[0].draw())
0: ──H─╭C─────────────────┤ Sample[|1⟩⟨1|]
1: ────╰X──X──MeasureNode─┤
>>> print(fragment_tapes[1].draw())
1: ──PrepareNode─╭C─┤ Sample[|1⟩⟨1|]
2: ──────────────╰X─┤ Sample[|1⟩⟨1|]
Additionally, we must remap the tape wires to match those available on our device.
>>> dev = qml.device("default.qubit", wires=2, shots=1)
>>> fragment_tapes = [
... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes
... ]
Note that the number of shots on the device is set to :math:`1` here since we
will only require one execution per fragment configuration. In the
following steps we introduce a shots value that will determine the number
of fragment configurations. When using the ``cut_circuit_mc()`` decorator
with a QNode, this shots value is automatically inferred from the provided
device.
Next, each circuit fragment is randomly expanded over :class:`~.MeasureNode` and
:class:`~.PrepareNode` configurations. For each pair, a measurement is sampled from
the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates.
A settings array is also given which tracks the configuration pairs. Since each of the 4
measurements has 2 possible eigenvectors, all configurations can be uniquely identified by
8 values. The number of rows is determined by the number of cuts and the number of columns
is determined by the number of shots.
>>> shots = 3
>>> configurations, settings = qml.transforms.qcut.expand_fragment_tapes_mc(
... fragment_tapes, communication_graph, shots=shots
... )
>>> tapes = tuple(tape for c in configurations for tape in c)
>>> settings
tensor([[6, 3, 4]], requires_grad=True)
Each configuration is drawn below:
>>> for t in tapes:
... print(t.draw())
... print("")
.. code-block::
0: ──H─╭C────┤ Sample[|1⟩⟨1|]
1: ────╰X──X─┤ Sample[Z]
0: ──H─╭C────┤ Sample[|1⟩⟨1|]
1: ────╰X──X─┤ Sample[X]
0: ──H─╭C────┤ Sample[|1⟩⟨1|]
1: ────╰X──X─┤ Sample[Y]
0: ──I─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[|1⟩⟨1|]
0: ──X──S─╭C─┤ Sample[|1⟩⟨1|]
1: ───────╰X─┤ Sample[|1⟩⟨1|]
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[|1⟩⟨1|]
The last step is to execute the tapes and postprocess the results using
:func:`~.qcut_processing_fn_sample`, which processes the results to approximate the original full circuit
output bitstrings.
>>> results = qml.execute(tapes, dev, gradient_fn=None)
>>> qml.transforms.qcut.qcut_processing_fn_sample(
... results,
... communication_graph,
... shots=shots,
... )
[array([[0., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]])]
Alternatively, it is possible to calculate an expectation value if a classical
processing function is provided that will accept the reconstructed circuit bitstrings
and return a value in the interval :math:`[-1, 1]`:
.. code-block::
def fn(x):
if x[0] == 0:
return 1
if x[0] == 1:
return -1
>>> qml.transforms.qcut.qcut_processing_fn_mc(
... results,
... communication_graph,
... settings,
... shots,
... fn
... )
array(-4.)
Using the Monte Carlo approach of [Peng et. al](https://arxiv.org/abs/1904.00102), the
`cut_circuit_mc` transform also supports returning sample-based expectation values of
observables that are diagonal in the computational basis, as shown below for a `ZZ` measurement
on wires `0` and `2`:
.. code-block::
dev = qml.device("default.qubit", wires=2, shots=10000)
def observable(bitstring):
return (-1) ** np.sum(bitstring)
@qml.cut_circuit_mc(classical_processing_fn=observable)
@qml.qnode(dev)
def circuit(x):
qml.RX(0.89, wires=0)
qml.RY(0.5, wires=1)
qml.RX(1.3, wires=2)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.RX(x, wires=0)
qml.RY(0.7, wires=1)
qml.RX(2.3, wires=2)
return qml.sample(wires=[0, 2])
We can now approximate the expectation value of the observable using
>>> circuit(x)
tensor(-0.776, requires_grad=True)
"""
# pylint: disable=unused-argument, too-many-arguments
if len(tape.measurements) != 1:
raise ValueError(
"The Monte Carlo circuit cutting workflow only supports circuits "
"with a single output measurement"
)
if not all(m.return_type is Sample for m in tape.measurements):
raise ValueError(
"The Monte Carlo circuit cutting workflow only supports circuits "
"with sampling-based measurements"
)
for meas in tape.measurements:
if meas.obs is not None:
raise ValueError(
"The Monte Carlo circuit cutting workflow only "
"supports measurements in the computational basis. Please only specify "
"wires to be sampled within qml.sample(), do not pass observables."
)
g = tape_to_graph(tape)
if auto_cutter is True or callable(auto_cutter):
cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy(
max_free_wires=len(device_wires)
)
g = find_and_place_cuts(
graph=g,
cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut,
cut_strategy=cut_strategy,
**kwargs,
)
replace_wire_cut_nodes(g)
fragments, communication_graph = fragment_graph(g)
fragment_tapes = [graph_to_tape(f) for f in fragments]
fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes]
configurations, settings = expand_fragment_tapes_mc(
fragment_tapes, communication_graph, shots=shots
)
tapes = tuple(tape for c in configurations for tape in c)
if classical_processing_fn:
return tapes, partial(
qcut_processing_fn_mc,
communication_graph=communication_graph,
settings=settings,
shots=shots,
classical_processing_fn=classical_processing_fn,
)
return tapes, partial(
qcut_processing_fn_sample, communication_graph=communication_graph, shots=shots
)
@cut_circuit_mc.custom_qnode_wrapper
def qnode_execution_wrapper_mc(self, qnode, targs, tkwargs):
"""Here, we overwrite the QNode execution wrapper in order
to replace execution variables"""
transform_max_diff = tkwargs.pop("max_diff", None)
tkwargs.setdefault("device_wires", qnode.device.wires)
if "shots" in inspect.signature(qnode.func).parameters:
raise ValueError(
"Detected 'shots' as an argument of the quantum function to transform. "
"The 'shots' argument name is reserved for overriding the number of shots "
"taken by the device."
)
def _wrapper(*args, **kwargs):
if tkwargs.get("shots", False):
raise ValueError(
"Cannot provide a 'shots' value directly to the cut_circuit_mc "
"decorator when transforming a QNode. Please provide the number of shots in "
"the device or when calling the QNode."
)
shots = kwargs.pop("shots", False)
shots = shots or qnode.device.shots
if shots is None:
raise ValueError(
"A shots value must be provided in the device "
"or when calling the QNode to be cut"
)
qnode.construct(args, kwargs)
tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs, shots=shots)
interface = qnode.interface
execute_kwargs = getattr(qnode, "execute_kwargs", {}).copy()
max_diff = execute_kwargs.pop("max_diff", 2)
max_diff = transform_max_diff or max_diff
gradient_fn = getattr(qnode, "gradient_fn", qnode.diff_method)
gradient_kwargs = getattr(qnode, "gradient_kwargs", {})
if interface is None or not self.differentiable:
gradient_fn = None
execute_kwargs["cache"] = False
res = qml.execute(
tapes,
device=qnode.device,
gradient_fn=gradient_fn,
interface=interface,
max_diff=max_diff,
override_shots=1,
gradient_kwargs=gradient_kwargs,
**execute_kwargs,
)
out = processing_fn(res)
if isinstance(out, list) and len(out) == 1:
return out[0]
return out
return _wrapper
def _get_symbol(i):
"""Finds the i-th ASCII symbol. Works for lowercase and uppercase letters, allowing i up to
51."""
if i >= len(string.ascii_letters):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(string.ascii_letters)} wire cuts to a circuit"
)
return string.ascii_letters[i]
# pylint: disable=too-many-branches
def contract_tensors(
tensors: Sequence,
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
r"""Contract tensors according to the edges specified in the communication graph.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Consider the three tensors :math:`T^{(1)}`, :math:`T^{(2)}`, and :math:`T^{(3)}`, along with
their contraction equation
.. math::
\sum_{ijklmn} T^{(1)}_{ij,km} T^{(2)}_{kl,in} T^{(3)}_{mn,jl}
Each tensor is the result of the tomography of a circuit fragment and has some indices
corresponding to state preparations (marked by the indices before the comma) and some indices
corresponding to measurements (marked by the indices after the comma).
An equivalent representation of the contraction equation is to use a directed multigraph known
as the communication/quotient graph. In the communication graph, each tensor is assigned a node
and edges are added between nodes to mark a contraction along an index. The communication graph
resulting from the above contraction equation is a complete directed graph.
In the communication graph provided by :func:`fragment_graph`, edges are composed of
:class:`PrepareNode` and :class:`MeasureNode` pairs. To correctly map back to the contraction
equation, we must keep track of the order of preparation and measurement indices in each tensor.
This order is specified in the ``prepare_nodes`` and ``measure_nodes`` arguments.
Args:
tensors (Sequence): the tensors to be contracted
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between the tensors
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of preparation indices in
each tensor
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of measurement indices in
each tensor
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
Returns:
float or tensor_like: the result of contracting the tensor network
**Example**
We first set up the tensors and their corresponding :class:`~.PrepareNode` and
:class:`~.MeasureNode` orderings:
.. code-block:: python
from pennylane.transforms import qcut
import networkx as nx
import numpy as np
tensors = [np.arange(4), np.arange(4, 8)]
prep = [[], [qcut.PrepareNode(wires=0)]]
meas = [[qcut.MeasureNode(wires=0)], []]
The communication graph describing edges in the tensor network must also be constructed:
.. code-block:: python
graph = nx.MultiDiGraph([(0, 1, {"pair": (meas[0][0], prep[1][0])})])
The network can then be contracted using:
>>> qml.transforms.qcut.contract_tensors(tensors, graph, prep, meas)
38
"""
# pylint: disable=import-outside-toplevel
if use_opt_einsum:
try:
from opt_einsum import contract, get_symbol
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the contract_tensors function. This package can be "
"installed using:\npip install opt_einsum"
) from e
else:
contract = qml.math.einsum
get_symbol = _get_symbol
ctr = 0
tensor_indxs = [""] * len(communication_graph.nodes)
meas_map = {}
for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)):
predecessors = communication_graph.pred[node]
for p in prep:
for _, pred_edges in predecessors.items():
for pred_edge in pred_edges.values():
meas_op, prep_op = pred_edge["pair"]
if p.id is prep_op.id:
symb = get_symbol(ctr)
ctr += 1
tensor_indxs[i] += symb
meas_map[meas_op] = symb
for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)):
successors = communication_graph.succ[node]
for m in meas:
for _, succ_edges in successors.items():
for succ_edge in succ_edges.values():
meas_op, _ = succ_edge["pair"]
if m.id is meas_op.id:
symb = meas_map[meas_op]
tensor_indxs[i] += symb
eqn = ",".join(tensor_indxs)
kwargs = {} if use_opt_einsum else {"like": tensors[0]}
return contract(eqn, *tensors, **kwargs)
CHANGE_OF_BASIS = qml.math.array(
[[1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 2.0, 0.0], [-1.0, -1.0, 0.0, 2.0], [1.0, -1.0, 0.0, 0.0]]
)
def _process_tensor(results, n_prep: int, n_meas: int):
"""Convert a flat slice of an individual circuit fragment's execution results into a tensor.
This function performs the following steps:
1. Reshapes ``results`` into the intermediate shape ``(4,) * n_prep + (4**n_meas,)``
2. Shuffles the final axis to follow the standard product over measurement settings. E.g., for
``n_meas = 2`` the standard product is: II, IX, IY, IZ, XI, ..., ZY, ZZ while the input order
will be the result of ``qml.grouping.partition_pauli_group(2)``, i.e., II, IZ, ZI, ZZ, ...,
YY.
3. Reshapes into the final target shape ``(4,) * (n_prep + n_meas)``
4. Performs a change of basis for the preparation indices (the first ``n_prep`` indices) from
the |0>, |1>, |+>, |+i> basis to the I, X, Y, Z basis using ``CHANGE_OF_BASIS``.
Args:
results (tensor_like): the input execution results
n_prep (int): the number of preparation nodes in the corresponding circuit fragment
n_meas (int): the number of measurement nodes in the corresponding circuit fragment
Returns:
tensor_like: the corresponding fragment tensor
"""
n = n_prep + n_meas
dim_meas = 4**n_meas
# Step 1
intermediate_shape = (4,) * n_prep + (dim_meas,)
intermediate_tensor = qml.math.reshape(results, intermediate_shape)
# Step 2
grouped = qml.grouping.partition_pauli_group(n_meas)
grouped_flat = [term for group in grouped for term in group]
order = qml.math.argsort(grouped_flat)
if qml.math.get_interface(intermediate_tensor) == "tensorflow":
# TensorFlow does not support slicing
intermediate_tensor = qml.math.gather(intermediate_tensor, order, axis=-1)
else:
sl = [slice(None)] * n_prep + [order]
intermediate_tensor = intermediate_tensor[tuple(sl)]
# Step 3
final_shape = (4,) * n
final_tensor = qml.math.reshape(intermediate_tensor, final_shape)
# Step 4
change_of_basis = qml.math.convert_like(CHANGE_OF_BASIS, intermediate_tensor)
for i in range(n_prep):
axes = [[1], [i]]
final_tensor = qml.math.tensordot(change_of_basis, final_tensor, axes=axes)
axes = list(reversed(range(n_prep))) + list(range(n_prep, n))
# Use transpose to reorder indices. We must do this because tensordot returns a tensor whose
# indices are ordered according to the uncontracted indices of the first tensor, followed
# by the uncontracted indices of the second tensor. For example, calculating C_kj T_ij returns
# a tensor T'_ki rather than T'_ik.
final_tensor = qml.math.transpose(final_tensor, axes=axes)
final_tensor *= qml.math.power(2, -(n_meas + n_prep) / 2)
return final_tensor
def _to_tensors(
results,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
) -> List:
"""Process a flat list of execution results from all circuit fragments into the corresponding
tensors.
This function slices ``results`` according to the expected size of fragment tensors derived from
the ``prepare_nodes`` and ``measure_nodes`` and then passes onto ``_process_tensor`` for further
transformation.
Args:
results (tensor_like): A collection of execution results, provided as a flat tensor,
corresponding to the expansion of circuit fragments in the communication graph over
measurement and preparation node configurations. These results are processed into
tensors by this function.
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence whose length is equal to the
number of circuit fragments, with each element used here to determine the number of
preparation nodes in a given fragment
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence whose length is equal to the
number of circuit fragments, with each element used here to determine the number of
measurement nodes in a given fragment
Returns:
List[tensor_like]: the tensors for each circuit fragment in the communication graph
"""
ctr = 0
tensors = []
for p, m in zip(prepare_nodes, measure_nodes):
n_prep = len(p)
n_meas = len(m)
n = n_prep + n_meas
dim = 4**n
results_slice = results[ctr : dim + ctr]
tensors.append(_process_tensor(results_slice, n_prep, n_meas))
ctr += dim
if results.shape[0] != ctr:
raise ValueError(f"The results argument should be a flat list of length {ctr}")
return tensors
def qcut_processing_fn(
results: Sequence[Sequence],
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
"""Processing function for the :func:`cut_circuit() <pennylane.cut_circuit>` transform.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
results (Sequence[Sequence]): A collection of execution results generated from the
expansion of circuit fragments over measurement and preparation node configurations.
These results are processed into tensors and then contracted.
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of preparation indices in
each tensor
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of measurement indices in
each tensor
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
Returns:
float or tensor_like: the output of the original uncut circuit arising from contracting
the tensor network of circuit fragments
"""
flat_results = qml.math.concatenate(results)
tensors = _to_tensors(flat_results, prepare_nodes, measure_nodes)
result = contract_tensors(
tensors, communication_graph, prepare_nodes, measure_nodes, use_opt_einsum
)
return result
@batch_transform
def cut_circuit(
tape: QuantumTape,
auto_cutter: Union[bool, Callable] = False,
use_opt_einsum: bool = False,
device_wires: Optional[Wires] = None,
max_depth: int = 1,
**kwargs,
) -> Tuple[Tuple[QuantumTape], Callable]:
"""
Cut up a quantum circuit into smaller circuit fragments.
Following the approach outlined in Theorem 2 of
`Peng et al. <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.150504>`__,
strategic placement of :class:`~.WireCut` operations can allow a quantum circuit to be split
into disconnected circuit fragments. Each circuit fragment is then executed multiple times by
varying the state preparations and measurements at incoming and outgoing cut locations,
respectively, resulting in a process tensor describing the action of the fragment. The process
tensors are then contracted to provide the result of the original uncut circuit.
.. note::
Only circuits that return a single expectation value are supported.
Args:
tape (QuantumTape): the tape of the full circuit to be cut
auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default
:func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that
takes an input graph and returns a list of edges to be cut based on a given set of
constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to
be installed using ``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from source for Windows users.
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
device_wires (Wires): Wires of the device that the cut circuits are to be run on.
When transforming a QNode, this argument is optional and will be set to the
QNode's device wires. Required when transforming a tape.
max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts.
Only applicable when transforming a QNode.
kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument.
For the default KaHyPar cutter, please refer to the docstring of functions
:func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments.
Returns:
Callable: Function which accepts the same arguments as the QNode.
When called, this function will perform a process tomography of the
partitioned circuit fragments and combine the results via tensor
contractions.
**Example**
The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation. When decorated
with ``@qml.cut_circuit``, we can cut the circuit into two :math:`2`-qubit fragments:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.cut_circuit
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.WireCut(wires=1)
qml.CZ(wires=[1, 2])
return qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
Executing ``circuit`` will run multiple configurations of the :math:`2`-qubit fragments which
are then postprocessed to give the result of the original circuit:
>>> x = np.array(0.531, requires_grad=True)
>>> circuit(x)
0.47165198882111165
Futhermore, the output of the cut circuit is also differentiable:
>>> qml.grad(circuit)(x)
-0.276982865449393
Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the
``auto_cutter`` option can be enabled to make attempts in finding such an optimal cut. The
following examples shows this capability on the same circuit as above but with the
:class:`~.WireCut` removed:
.. code-block:: python
@qml.cut_circuit(auto_cutter=True)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.CZ(wires=[1, 2])
return qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> x = np.array(0.531, requires_grad=True)
>>> circuit(x)
0.47165198882111165
>>> qml.grad(circuit)(x)
-0.276982865449393
.. UsageDetails::
Manually placing :class:`~.WireCut` operations and decorating the QNode with the
``cut_circuit()`` batch transform is the suggested entrypoint into circuit cutting. However,
advanced users also have the option to work directly with a :class:`~.QuantumTape` and
manipulate the tape to perform circuit cutting using the below functionality:
.. autosummary::
:toctree:
~transforms.qcut.tape_to_graph
~transforms.qcut.find_and_place_cuts
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tape
~transforms.qcut.qcut_processing_fn
~transforms.qcut.CutStrategy
The following shows how these elementary steps are combined as part of the
``cut_circuit()`` transform.
Consider the circuit below:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.531, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.WireCut(wires=1)
qml.CZ(wires=[1, 2])
qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> print(tape.draw())
0: ──RX(0.531)──╭C──RY(-0.4)──────╭┤ ⟨Z ⊗ Z ⊗ Z⟩
1: ──RY(0.9)────╰Z──//────────╭C──├┤ ⟨Z ⊗ Z ⊗ Z⟩
2: ──RX(0.3)──────────────────╰Z──╰┤ ⟨Z ⊗ Z ⊗ Z⟩
To cut the circuit, we first convert it to its graph representation:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
.. figure:: ../../_static/qcut_graph.svg
:align: center
:width: 60%
:target: javascript:void(0);
If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use
:func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut
given the device constraints. Using the same circuit as above but with the
:class:`~.WireCut` removed, the same (optimal) cut can be recovered with automatic
cutting:
.. code-block:: python
with qml.tape.QuantumTape() as uncut_tape:
qml.RX(0.531, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.CZ(wires=[1, 2])
qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph = qml.transforms.qcut.tape_to_graph(uncut_tape),
cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2),
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX─╭C──RY────┤ ╭<Z@Z@Z>
1: ──RY─╰Z──//─╭C─┤ ├<Z@Z@Z>
2: ──RX────────╰Z─┤ ╰<Z@Z@Z>
Our next step is to remove the :class:`~.WireCut` nodes in the graph and replace with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs.
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that
allow us to cut the circuit graph and then iterate over measurement and preparation
configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart
the graph into disconnected components as well as returning the
`communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__
detailing the connectivity between the components.
>>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph)
We now convert the ``fragments`` back to :class:`~.QuantumTape` objects
>>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments]
The circuit fragments can now be visualized:
>>> print(fragment_tapes[0].draw())
0: ──RX(0.531)──╭C──RY(-0.4)─────┤ ⟨Z⟩
1: ──RY(0.9)────╰Z──MeasureNode──┤
>>> print(fragment_tapes[1].draw())
2: ──RX(0.3)──────╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──PrepareNode──╰C──╰┤ ⟨Z ⊗ Z⟩
Additionally, we must remap the tape wires to match those available on our device.
>>> dev = qml.device("default.qubit", wires=2)
>>> fragment_tapes = [
... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes
... ]
Next, each circuit fragment is expanded over :class:`~.MeasureNode` and
:class:`~.PrepareNode` configurations and a flat list of tapes is created:
.. code-block::
expanded = [qml.transforms.qcut.expand_fragment_tape(t) for t in fragment_tapes]
configurations = []
prepare_nodes = []
measure_nodes = []
for tapes, p, m in expanded:
configurations.append(tapes)
prepare_nodes.append(p)
measure_nodes.append(m)
tapes = tuple(tape for c in configurations for tape in c)
Each configuration is drawn below:
>>> for t in tapes:
... print(t.draw())
.. code-block::
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ I⟩ ╭┤ ⟨Z ⊗ Z⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ I⟩ ╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ X⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ X⟩
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ Y⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ Y⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──I────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──X────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──H────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)─────╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──H────────S──╰C──╰┤ ⟨Z ⊗ Z⟩
The last step is to execute the tapes and postprocess the results using
:func:`~.qcut_processing_fn`, which processes the results to the original full circuit
output via a tensor network contraction
>>> results = qml.execute(tapes, dev, gradient_fn=None)
>>> qml.transforms.qcut.qcut_processing_fn(
... results,
... communication_graph,
... prepare_nodes,
... measure_nodes,
... )
0.47165198882111165
"""
# pylint: disable=unused-argument
if len(tape.measurements) != 1:
raise ValueError(
"The circuit cutting workflow only supports circuits with a single output "
"measurement"
)
if not all(m.return_type is Expectation for m in tape.measurements):
raise ValueError(
"The circuit cutting workflow only supports circuits with expectation "
"value measurements"
)
if use_opt_einsum:
try:
import opt_einsum # pylint: disable=import-outside-toplevel,unused-import
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the cut_circuit function. This package can be "
"installed using:\npip install opt_einsum"
) from e
g = tape_to_graph(tape)
if auto_cutter is True or callable(auto_cutter):
cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy(
max_free_wires=len(device_wires)
)
g = find_and_place_cuts(
graph=g,
cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut,
cut_strategy=cut_strategy,
**kwargs,
)
replace_wire_cut_nodes(g)
fragments, communication_graph = fragment_graph(g)
fragment_tapes = [graph_to_tape(f) for f in fragments]
fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes]
expanded = [expand_fragment_tape(t) for t in fragment_tapes]
configurations = []
prepare_nodes = []
measure_nodes = []
for tapes, p, m in expanded:
configurations.append(tapes)
prepare_nodes.append(p)
measure_nodes.append(m)
tapes = tuple(tape for c in configurations for tape in c)
return tapes, partial(
qcut_processing_fn,
communication_graph=communication_graph,
prepare_nodes=prepare_nodes,
measure_nodes=measure_nodes,
use_opt_einsum=use_opt_einsum,
)
@cut_circuit.custom_qnode_wrapper
def qnode_execution_wrapper(self, qnode, targs, tkwargs):
"""Here, we overwrite the QNode execution wrapper in order
to access the device wires."""
# pylint: disable=function-redefined
tkwargs.setdefault("device_wires", qnode.device.wires)
return self.default_qnode_wrapper(qnode, targs, tkwargs)
def _qcut_expand_fn(
tape: QuantumTape,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
):
"""Expansion function for circuit cutting.
Expands operations until reaching a depth that includes :class:`~.WireCut` operations.
"""
for op in tape.operations:
if isinstance(op, WireCut):
return tape
if max_depth > 0:
return _qcut_expand_fn(tape.expand(), max_depth=max_depth - 1, auto_cutter=auto_cutter)
if not (auto_cutter is True or callable(auto_cutter)):
raise ValueError(
"No WireCut operations found in the circuit. Consider increasing the max_depth value if"
" operations or nested tapes contain WireCut operations."
)
return tape
def _cut_circuit_expand(
tape: QuantumTape,
use_opt_einsum: bool = False,
device_wires: Optional[Wires] = None,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations until reaching a depth that
includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument
return _qcut_expand_fn(tape, max_depth, auto_cutter)
def _cut_circuit_mc_expand(
tape: QuantumTape,
classical_processing_fn: Optional[callable] = None,
max_depth: int = 1,
shots: Optional[int] = None,
device_wires: Optional[Wires] = None,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations in sample-based tapes until
reaching a depth that includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument, too-many-arguments
return _qcut_expand_fn(tape, max_depth, auto_cutter)
cut_circuit.expand_fn = _cut_circuit_expand
cut_circuit_mc.expand_fn = _cut_circuit_mc_expand
def remap_tape_wires(tape: QuantumTape, wires: Sequence) -> QuantumTape:
"""Map the wires of a tape to a new set of wires.
Given an :math:`n`-wire ``tape``, this function returns a new :class:`~.QuantumTape` with
operations and measurements acting on the first :math:`n` wires provided in the ``wires``
argument. The input ``tape`` is left unmodified.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the quantum tape whose wires should be remapped
wires (Sequence): the new set of wires to map to
Returns:
QuantumTape: A remapped copy of the input tape
Raises:
ValueError: if the number of wires in ``tape`` exceeds ``len(wires)``
**Example**
Consider the following circuit that operates on wires ``[2, 3]``:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.5, wires=2)
qml.RY(0.6, wires=3)
qml.CNOT(wires=[2, 3])
qml.expval(qml.PauliZ(2) @ qml.PauliZ(3))
We can map from wires ``[2, 3]`` to ``[0, 1]`` using:
>>> new_wires = [0, 1]
>>> new_tape = qml.transforms.qcut.remap_tape_wires(tape, new_wires)
>>> print(new_tape.draw())
0: ──RX(0.5)──╭C──╭┤ ⟨Z ⊗ Z⟩
1: ──RY(0.6)──╰X──╰┤ ⟨Z ⊗ Z⟩
"""
if len(tape.wires) > len(wires):
raise ValueError(
f"Attempting to run a {len(tape.wires)}-wire circuit on a "
f"{len(wires)}-wire device. Consider increasing the number of wires in "
f"your device."
)
wire_map = dict(zip(tape.wires, wires))
copy_ops = [copy.copy(op) for op in tape.operations]
copy_meas = [copy.copy(op) for op in tape.measurements]
with QuantumTape() as new_tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
op._wires = new_wires
apply(op)
for meas in copy_meas:
obs = meas.obs
if isinstance(obs, Tensor):
for obs in obs.obs:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
else:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
apply(meas)
return new_tape
@dataclass()
class CutStrategy:
"""
A circuit-cutting distribution policy for executing (large) circuits on available (comparably
smaller) devices.
.. note::
This class is part of a work-in-progress feature to support automatic cut placement in the
circuit cutting workflow. Currently only manual placement of cuts is supported,
check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
devices (Union[qml.Device, Sequence[qml.Device]]): Single, or Sequence of, device(s).
Optional only when ``max_free_wires`` is provided.
max_free_wires (int): Number of wires for the largest available device. Optional only when
``devices`` is provided where it defaults to the maximum number of wires among
``devices``.
min_free_wires (int): Number of wires for the smallest available device, or, equivalently,
the smallest max fragment-wire-size that the partitioning is allowed to explore.
When provided, this parameter will be used to derive an upper-bound to the range of
explored number of fragments. Optional, defaults to 2 which corresponds to attempting
the most granular partitioning of max 2-wire fragments.
num_fragments_probed (Union[int, Sequence[int]]): Single, or 2-Sequence of, number(s)
specifying the potential (range of) number of fragments for the partitioner to attempt.
Optional, defaults to probing all valid strategies derivable from the circuit and
devices. When provided, has precedence over all other arguments affecting partitioning
exploration, such as ``max_free_wires``, ``min_free_wires``, or ``exhaustive``.
max_free_gates (int): Maximum allowed circuit depth for the deepest available device.
Optional, defaults to unlimited depth.
min_free_gates (int): Maximum allowed circuit depth for the shallowest available device.
Optional, defaults to ``max_free_gates``.
imbalance_tolerance (float): The global maximum allowed imbalance for all partition trials.
Optional, defaults to unlimited imbalance. Used only if there's a known hard balancing
constraint on the partitioning problem.
trials_per_probe (int): Number of repeated partitioning trials for a random automatic
cutting method to attempt per set of partitioning parameters. For a deterministic
cutting method, this can be set to 1. Defaults to 4.
**Example**
The following cut strategy specifies that a circuit should be cut into between
``2`` to ``5`` fragments, with each fragment having at most ``6`` wires and
at least ``4`` wires:
>>> cut_strategy = qml.transforms.CutStrategy(
... max_free_wires=6,
... min_free_wires=4,
... num_fragments_probed=(2, 5),
... )
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
#: Initialization argument only, used to derive ``max_free_wires`` and ``min_free_wires``.
devices: InitVar[Union[qml.Device, Sequence[qml.Device]]] = None
#: Number of wires for the largest available device.
max_free_wires: int = None
#: Number of wires for the smallest available device.
min_free_wires: int = None
#: The potential (range of) number of fragments for the partitioner to attempt.
num_fragments_probed: Union[int, Sequence[int]] = None
#: Maximum allowed circuit depth for the deepest available device.
max_free_gates: int = None
#: Maximum allowed circuit depth for the shallowest available device.
min_free_gates: int = None
#: The global maximum allowed imbalance for all partition trials.
imbalance_tolerance: float = None
#: Number of trials to repeat for per set of partition parameters probed.
trials_per_probe: int = 4
#: Class attribute, threshold for warning about too many fragments.
HIGH_NUM_FRAGMENTS: ClassVar[int] = 20
#: Class attribute, threshold for warning about too many partition attempts.
HIGH_PARTITION_ATTEMPTS: ClassVar[int] = 20
def __post_init__(
self,
devices,
):
"""Deriving cutting constraints from given devices and parameters."""
self.max_free_wires = self.max_free_wires
if isinstance(self.num_fragments_probed, int):
self.num_fragments_probed = [self.num_fragments_probed]
if isinstance(self.num_fragments_probed, (list, tuple)):
self.num_fragments_probed = sorted(self.num_fragments_probed)
self.k_lower = self.num_fragments_probed[0]
self.k_upper = self.num_fragments_probed[-1]
if self.k_lower <= 0:
raise ValueError("`num_fragments_probed` must be positive int(s)")
else:
self.k_lower, self.k_upper = None, None
if devices is None and self.max_free_wires is None:
raise ValueError("One of arguments `devices` and max_free_wires` must be provided.")
if isinstance(devices, qml.Device):
devices = (devices,)
if devices is not None:
if not isinstance(devices, SequenceType) or any(
(not isinstance(d, qml.Device) for d in devices)
):
raise ValueError(
"Argument `devices` must be a list or tuple containing elements of type "
"`qml.Device`"
)
device_wire_sizes = [len(d.wires) for d in devices]
self.max_free_wires = self.max_free_wires or max(device_wire_sizes)
self.min_free_wires = self.min_free_wires or min(device_wire_sizes)
if (self.imbalance_tolerance is not None) and not (
isinstance(self.imbalance_tolerance, (float, int)) and self.imbalance_tolerance >= 0
):
raise ValueError(
"The overall `imbalance_tolerance` is expected to be a non-negative number, "
f"got {type(self.imbalance_tolerance)} with value {self.imbalance_tolerance}."
)
self.min_free_wires = self.min_free_wires or 1
def get_cut_kwargs(
self,
tape_dag: MultiDiGraph,
max_wires_by_fragment: Sequence[int] = None,
max_gates_by_fragment: Sequence[int] = None,
exhaustive: bool = True,
) -> List[Dict[str, Any]]:
"""Derive the complete set of arguments, based on a given circuit, for passing to a graph
partitioner.
Args:
tape_dag (nx.MultiDiGraph): Graph representing a tape, typically the output of
:func:`tape_to_graph`.
max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially
valid numbers of fragments into which the circuit is partitioned. If ``True``,
for a circuit with N gates, N - 1 attempts will be made with ``num_fragments``
ranging from [2, N], i.e. from bi-partitioning to complete partitioning where each
fragment has exactly a single gate. Defaults to ``True``.
Returns:
List[Dict[str, Any]]: A list of minimal kwargs being passed to a graph
partitioner method.
**Example**
Deriving kwargs for a given circuit and feeding them to a custom partitioner, along with
extra parameters specified using ``extra_kwargs``:
>>> cut_strategy = qcut.CutStrategy(devices=dev)
>>> cut_kwargs = cut_strategy.get_cut_kwargs(tape_dag)
>>> cut_trials = [
... my_partition_fn(tape_dag, **kwargs, **extra_kwargs) for kwargs in cut_kwargs
... ]
"""
tape_wires = set(w for _, _, w in tape_dag.edges.data("wire"))
num_tape_wires = len(tape_wires)
num_tape_gates = sum(not isinstance(n, WireCut) for n in tape_dag.nodes)
self._validate_input(max_wires_by_fragment, max_gates_by_fragment)
probed_cuts = self._infer_probed_cuts(
num_tape_wires=num_tape_wires,
num_tape_gates=num_tape_gates,
max_wires_by_fragment=max_wires_by_fragment,
max_gates_by_fragment=max_gates_by_fragment,
exhaustive=exhaustive,
)
return probed_cuts
@staticmethod
def _infer_imbalance(
k, num_wires, num_gates, free_wires, free_gates, imbalance_tolerance=None
) -> float:
"""Helper function for determining best imbalance limit."""
avg_fragment_wires = (num_wires - 1) // k + 1
avg_fragment_gates = (num_gates - 1) // k + 1
if free_wires < avg_fragment_wires:
raise ValueError(
"`free_wires` should be no less than the average number of wires per fragment. "
f"Got {free_wires} >= {avg_fragment_wires} ."
)
if free_gates < avg_fragment_gates:
raise ValueError(
"`free_gates` should be no less than the average number of gates per fragment. "
f"Got {free_gates} >= {avg_fragment_gates} ."
)
if free_gates > num_gates - k:
# Case where gate depth not limited (`-k` since each fragments has to have >= 1 gates):
free_gates = num_gates
# A small adjustment is added to the imbalance factor to prevents small ks from resulting
# in extremely unbalanced fragments. It will heuristically force the smallest fragment size
# to be >= 3 if the average fragment size is greater than 5. In other words, tiny fragments
# are only allowed when average fragmeng size is small in the first place.
balancing_adjustment = 2 if avg_fragment_gates > 5 else 0
free_gates = free_gates - (k - 1 + balancing_adjustment)
gate_imbalance = free_gates / avg_fragment_gates - 1
imbalance = max(gate_imbalance, 0.1 / avg_fragment_gates) # numerical stability
if imbalance_tolerance is not None:
imbalance = min(imbalance, imbalance_tolerance)
return imbalance
@staticmethod
def _validate_input(
max_wires_by_fragment,
max_gates_by_fragment,
):
"""Helper parameter checker."""
if max_wires_by_fragment is not None:
if not isinstance(max_wires_by_fragment, (list, tuple)):
raise ValueError(
"`max_wires_by_fragment` is expected to be a list or tuple, but got "
f"{type(max_gates_by_fragment)}."
)
if any(not (isinstance(i, int) and i > 0) for i in max_wires_by_fragment):
raise ValueError(
"`max_wires_by_fragment` is expected to contain positive integers only."
)
if max_gates_by_fragment is not None:
if not isinstance(max_gates_by_fragment, (list, tuple)):
raise ValueError(
"`max_gates_by_fragment` is expected to be a list or tuple, but got "
f"{type(max_gates_by_fragment)}."
)
if any(not (isinstance(i, int) and i > 0) for i in max_gates_by_fragment):
raise ValueError(
"`max_gates_by_fragment` is expected to contain positive integers only."
)
if max_wires_by_fragment is not None and max_gates_by_fragment is not None:
if len(max_wires_by_fragment) != len(max_gates_by_fragment):
raise ValueError(
"The lengths of `max_wires_by_fragment` and `max_gates_by_fragment` should be "
f"equal, but got {len(max_wires_by_fragment)} and {len(max_gates_by_fragment)}."
)
def _infer_probed_cuts(
self,
num_tape_wires,
num_tape_gates,
max_wires_by_fragment=None,
max_gates_by_fragment=None,
exhaustive=True,
) -> List[Dict[str, Any]]:
"""
Helper function for deriving the minimal set of best default partitioning constraints
for the graph partitioner.
Args:
num_tape_wires (int): Number of wires in the circuit tape to be partitioned.
num_tape_gates (int): Number of gates in the circuit tape to be partitioned.
max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially
valid numbers of fragments into which the circuit is partitioned. If ``True``,
``num_tape_gates - 1`` attempts will be made with ``num_fragments`` ranging from
[2, ``num_tape_gates``], i.e. from bi-partitioning to complete partitioning where
each fragment has exactly a single gate. Defaults to ``True``.
Returns:
List[Dict[str, Any]]: A list of minimal set of kwargs being passed to a graph
partitioner method.
"""
# Assumes unlimited width/depth if not supplied.
max_free_wires = self.max_free_wires or num_tape_wires
max_free_gates = self.max_free_gates or num_tape_gates
# Assumes same number of wires/gates across all devices if min_free_* not provided.
min_free_wires = self.min_free_wires or max_free_wires
min_free_gates = self.min_free_gates or max_free_gates
# The lower bound of k corresponds to executing each fragment on the largest available
# device.
k_lb = 1 + max(
(num_tape_wires - 1) // max_free_wires, # wire limited
(num_tape_gates - 1) // max_free_gates, # gate limited
)
# The upper bound of k corresponds to executing each fragment on the smallest available
# device.
k_ub = 1 + max(
(num_tape_wires - 1) // min_free_wires, # wire limited
(num_tape_gates - 1) // min_free_gates, # gate limited
)
if exhaustive:
k_lb = max(2, k_lb)
k_ub = num_tape_gates
# The global imbalance tolerance, if not given, defaults to a very loose upper bound:
imbalance_tolerance = k_ub if self.imbalance_tolerance is None else self.imbalance_tolerance
probed_cuts = []
if max_gates_by_fragment is None and max_wires_by_fragment is None:
# k_lower, when supplied by a user, can be higher than k_lb if the the desired k is known:
k_lower = self.k_lower if self.k_lower is not None else k_lb
# k_upper, when supplied by a user, can be higher than k_ub to encourage exploration:
k_upper = self.k_upper if self.k_upper is not None else k_ub
if k_lower < k_lb:
warnings.warn(
f"The provided `k_lower={k_lower}` is less than the lowest allowed value, "
f"will override and set `k_lower={k_lb}`."
)
k_lower = k_lb
if k_lower > self.HIGH_NUM_FRAGMENTS:
warnings.warn(
f"The attempted number of fragments seems high with lower bound at {k_lower}."
)
# Prepare the list of ks to explore:
ks = list(range(k_lower, k_upper + 1))
if len(ks) > self.HIGH_PARTITION_ATTEMPTS:
warnings.warn(f"The numer of partition attempts seems high ({len(ks)}).")
else:
# When the by-fragment wire and/or gate limits are supplied, derive k and imbalance and
# return a single partition config.
ks = [len(max_wires_by_fragment or max_gates_by_fragment)]
for k in ks:
imbalance = self._infer_imbalance(
k,
num_tape_wires,
num_tape_gates,
max_free_wires if max_wires_by_fragment is None else max(max_wires_by_fragment),
max_free_gates if max_gates_by_fragment is None else max(max_gates_by_fragment),
imbalance_tolerance,
)
cut_kwargs = {
"num_fragments": k,
"imbalance": imbalance,
}
if max_wires_by_fragment is not None:
cut_kwargs["max_wires_by_fragment"] = max_wires_by_fragment
if max_gates_by_fragment is not None:
cut_kwargs["max_gates_by_fragment"] = max_gates_by_fragment
probed_cuts.append(cut_kwargs)
return probed_cuts
def _graph_to_hmetis(
graph: MultiDiGraph,
hyperwire_weight: int = 0,
edge_weights: Sequence[int] = None,
) -> Tuple[List[int], List[int], List[Union[int, float]]]:
"""Converts a ``MultiDiGraph`` into the
`hMETIS hypergraph input format <http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf>`__
conforming to KaHyPar's calling signature.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires.
Defaults to 0 which leads to no such insertion. If greater than 0, hyperedges will be
appended with the provided weight, to encourage the resulting fragments to cluster gates
on the same wire together.
edge_weights (Sequence[int]): Weights for regular edges in the graph. Defaults to ``None``,
which leads to unit-weighted edges.
Returns:
Tuple[List,List,List]: The 3 lists representing an (optionally weighted) hypergraph:
- Flattened list of adjacent node indices.
- List of starting indices for edges in the above adjacent-nodes-list.
- Optional list of edge weights. ``None`` if ``hyperwire_weight`` is equal to 0.
"""
nodes = list(graph.nodes)
edges = graph.edges(data="wire")
wires = {w for _, _, w in edges}
adj_nodes = [nodes.index(v) for ops in graph.edges(keys=False) for v in ops]
edge_splits = qml.math.cumsum([0] + [len(e) for e in graph.edges(keys=False)]).tolist()
edge_weights = (
edge_weights if edge_weights is not None and len(edges) == len(edge_weights) else None
)
if hyperwire_weight:
hyperwires = {w: set() for w in wires}
num_wires = len(hyperwires)
for v0, v1, wire in edges:
hyperwires[wire].update([nodes.index(v0), nodes.index(v1)])
for wire, nodes_on_wire in hyperwires.items():
nwv = len(nodes_on_wire)
edge_splits.append(nwv + edge_splits[-1])
adj_nodes = adj_nodes + list(nodes_on_wire)
assert len(edge_splits) == len(edges) + num_wires + 1
if isinstance(hyperwire_weight, (int, float)):
# assumes original edges having unit weights by default:
edge_weights = edge_weights or ([1] * len(edges))
wire_weights = [hyperwire_weight] * num_wires
edge_weights = edge_weights + wire_weights
return adj_nodes, edge_splits, edge_weights
def kahypar_cut(
graph: MultiDiGraph,
num_fragments: int,
imbalance: int = None,
edge_weights: List[Union[int, float]] = None,
node_weights: List[Union[int, float]] = None,
fragment_weights: List[Union[int, float]] = None,
hyperwire_weight: int = 1,
seed: int = None,
config_path: Union[str, Path] = None,
trial: int = None,
verbose: bool = False,
) -> List[Tuple[Operation, Operation, Any]]:
"""Calls `KaHyPar <https://kahypar.org/>`__ to partition a graph.
.. warning::
Requires KaHyPar to be installed separately. For Linux and Mac users,
KaHyPar can be installed using ``pip install kahypar``. Windows users
can follow the instructions
`here <https://kahypar.org>`__ to compile from source.
Args:
graph (nx.MultiDiGraph): The graph to be partitioned.
num_fragments (int): Desired number of fragments.
imbalance (int): Imbalance factor of the partitioning. Defaults to KaHyPar's determination.
edge_weights (List[Union[int, float]]): Weights for edges. Defaults to unit-weighted edges.
node_weights (List[Union[int, float]]): Weights for nodes. Defaults to unit-weighted nodes.
fragment_weights (List[Union[int, float]]): Maximum size constraints by fragment. Defaults
to no such constraints, with ``imbalance`` the only parameter affecting fragment sizes.
hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires.
Setting it to 0 leads to no such insertion. If greater than 0, hyperedges will be
appended with the provided weight, to encourage the resulting fragments to cluster gates
on the same wire together. Defaults to 1.
seed (int): KaHyPar's seed. Defaults to the seed in the config file which defaults to -1,
i.e. unfixed seed.
config_path (str): KaHyPar's ``.ini`` config file path. Defaults to its SEA20 paper config.
trial (int): trial id for summary label creation. Defaults to ``None``.
verbose (bool): Flag for printing KaHyPar's output summary. Defaults to ``False``.
Returns:
List[Union[int, Any]]: List of cut edges.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.RZ(0.240, wires=0)
qml.RZ(0.133, wires="a")
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.expval(qml.PauliZ(wires=[0]))
We can let KaHyPar automatically find the optimal edges to place cuts:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_edges = qml.transforms.qcut.kahypar_cut(
graph=graph,
num_fragments=2,
)
>>> cut_edges
[(CNOT(wires=[0, 'a']), RZ(0.24, wires=[0]), 0)]
"""
# pylint: disable=too-many-arguments, import-outside-toplevel
try:
import kahypar
except ImportError as e:
raise ImportError(
"KaHyPar must be installed to use this method for automatic "
"cut placement. Try pip install kahypar or visit "
"https://kahypar.org/ for installation instructions."
) from e
adjacent_nodes, edge_splits, edge_weights = _graph_to_hmetis(
graph=graph, hyperwire_weight=hyperwire_weight, edge_weights=edge_weights
)
trial = 0 if trial is None else trial
ne = len(edge_splits) - 1
nv = max(adjacent_nodes) + 1
if edge_weights is not None or node_weights is not None:
edge_weights = edge_weights or [1] * ne
node_weights = node_weights or [1] * nv
hypergraph = kahypar.Hypergraph(
nv,
ne,
edge_splits,
adjacent_nodes,
num_fragments,
edge_weights,
node_weights,
)
else:
hypergraph = kahypar.Hypergraph(nv, ne, edge_splits, adjacent_nodes, num_fragments)
context = kahypar.Context()
config_path = config_path or str(Path(__file__).parent / "_cut_kKaHyPar_sea20.ini")
context.loadINIconfiguration(config_path)
context.setK(num_fragments)
if isinstance(imbalance, float):
context.setEpsilon(imbalance)
if isinstance(fragment_weights, SequenceType) and (len(fragment_weights) == num_fragments):
context.setCustomTargetBlockWeights(fragment_weights)
if not verbose:
context.suppressOutput(True)
# KaHyPar fixes seed to 42 by default, need to manually sample seed to randomize:
kahypar_seed = np.random.default_rng(seed).choice(2**15)
context.setSeed(kahypar_seed)
kahypar.partition(hypergraph, context)
cut_edge_mask = [hypergraph.connectivity(e) > 1 for e in hypergraph.edges()]
# compress() ignores the extra hyperwires at the end if there is any.
cut_edges = list(compress(graph.edges, cut_edge_mask))
if verbose:
fragment_sizes = [hypergraph.blockSize(p) for p in range(num_fragments)]
print(len(fragment_sizes), fragment_sizes)
return cut_edges
def place_wire_cuts(
graph: MultiDiGraph, cut_edges: Sequence[Tuple[Operation, Operation, Any]]
) -> MultiDiGraph:
"""Inserts a :class:`~.WireCut` node for each provided cut edge into a circuit graph.
Args:
graph (nx.MultiDiGraph): The original (tape-converted) graph to be cut.
cut_edges (Sequence[Tuple[Operation, Operation, Any]]): List of ``MultiDiGraph`` edges
to be replaced with a :class:`~.WireCut` node. Each 3-tuple represents the source node, the
target node, and the wire key of the (multi)edge.
Returns:
MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.expval(qml.PauliZ(wires=[0]))
>>> print(tape.draw())
0: ──RX(0.432)──╭C──┤ ⟨Z⟩
a: ──RY(0.543)──╰X──┤
If we know we want to place a :class:`~.WireCut` node between nodes ``RY(0.543, wires=["a"])`` and
``CNOT(wires=[0, 'a'])`` after the tape is constructed, we can first find the edge in the graph:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> op0, op1 = tape.operations[1], tape.operations[2]
>>> cut_edges = [e for e in graph.edges if e[0] is op0 and e[1] is op1]
>>> cut_edges
[(RY(0.543, wires=['a']), CNOT(wires=[0, 'a']), 0)]
Then feed it to this function for placement:
>>> cut_graph = qml.transforms.qcut.place_wire_cuts(graph=graph, cut_edges=cut_edges)
>>> cut_graph
<networkx.classes.multidigraph.MultiDiGraph at 0x7f7251ac1220>
And visualize the cut by converting back to a tape:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX(0.432)──────╭C──┤ ⟨Z⟩
a: ──RY(0.543)──//──╰X──┤
"""
cut_graph = graph.copy()
for op0, op1, wire_key in cut_edges:
# Get info:
order = cut_graph.nodes[op0]["order"] + 1
wire = cut_graph.edges[(op0, op1, wire_key)]["wire"]
# Apply cut:
cut_graph.remove_edge(op0, op1, wire_key)
# Increment order for all subsequent gates:
for op, o in cut_graph.nodes(data="order"):
if o >= order:
cut_graph.nodes[op]["order"] += 1
# Add WireCut
wire_cut = WireCut(wires=wire)
cut_graph.add_node(wire_cut, order=order)
cut_graph.add_edge(op0, wire_cut, wire=wire)
cut_graph.add_edge(wire_cut, op1, wire=wire)
return cut_graph
def _remove_existing_cuts(graph: MultiDiGraph) -> MultiDiGraph:
"""Removes all existing, manually or automatically placed, cuts from a circuit graph, be it
``WireCut``s or ``MeasureNode``-``PrepareNode`` pairs.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
Returns:
(MultiDiGraph): Copy of the input graph with all its existing cuts removed.
"""
uncut_graph = graph.copy()
for op in list(graph.nodes):
if isinstance(op, WireCut):
uncut_graph.remove_node(op)
elif isinstance(op, MeasureNode):
for op1 in graph.neighbors(op):
if isinstance(op1, PrepareNode):
uncut_graph.remove_node(op)
uncut_graph.remove_node(op1)
if len([n for n in uncut_graph.nodes if isinstance(n, (MeasureNode, PrepareNode))]) > 0:
warnings.warn(
"The circuit contains `MeasureNode` or `PrepareNode` operations that are "
"not paired up correctly. Please check.",
UserWarning,
)
return uncut_graph
def find_and_place_cuts(
graph: MultiDiGraph,
cut_method: Callable = kahypar_cut,
cut_strategy: CutStrategy = None,
replace_wire_cuts=False,
local_measurement=False,
**kwargs,
) -> MultiDiGraph:
"""Automatically finds and places optimal :class:`~.WireCut` nodes into a given tape-converted graph
using a customizable graph partitioning function. Preserves existing placed cuts.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
cut_method (Callable): A graph partitioning function that takes an input graph and returns
a list of edges to be cut based on a given set of constraints and objective. Defaults
to :func:`kahypar_cut` which requires KaHyPar to be installed using
``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from
source for Windows users.
cut_strategy (CutStrategy): Strategy for optimizing cutting parameters based on device
constraints. Defaults to ``None`` in which case ``kwargs`` must be fully specified
for passing to the ``cut_method``.
replace_wire_cuts (bool): Whether to replace :class:`~.WireCut` nodes with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. Defaults to ``False``.
local_measurement (bool): Whether to use the local-measurement circuit-cutting objective,
i.e. the maximum node-degree of the communication graph, for cut evaluation. Defaults
to ``False`` which assumes global measurement and uses the total number of cuts as the
cutting objective.
kwargs: Additional keyword arguments to be passed to the callable ``cut_method``.
Returns:
nx.MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 4-wire circuit with a single CNOT gate connecting the top (wires
``[0, 1]``) and bottom (wires ``["a", "b"]``) halves of the circuit. Note there's a
:class:`~.WireCut` manually placed into the circuit already.
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.RY(0.2, wires=1)
qml.RX(0.3, wires="a")
qml.RY(0.4, wires="b")
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=["a", "b"])
qml.CNOT(wires=[1, "a"])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=["a", "b"])
qml.RX(0.5, wires="a")
qml.RY(0.6, wires="b")
qml.expval(qml.PauliX(wires=[0]) @ qml.PauliY(wires=["a"]) @ qml.PauliZ(wires=["b"]))
>>> print(tape.draw())
0: ──RX(0.1)──╭C──────────╭C───────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C──╰X───────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C──RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X──RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
Since the existing :class:`~.WireCut` doesn't sufficiently fragment the circuit, we can find the
remaining cuts using the default KaHyPar partitioner:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
)
Visualizing the newly-placed cut:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX(0.1)──╭C───────────────╭C────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C───//──╰X────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C────RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X────RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
We can then proceed with the usual process of replacing :class:`~.WireCut` nodes with
pairs of :class:`~.MeasureNode` and :class:`~.PrepareNode`, and then break the graph
into fragments. Or, alternatively, we can directly get such processed graph by passing
``replace_wire_cuts=True``:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
replace_wire_cuts=True,
)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX(0.1)──────╭C───────────────╭C──┤ ⟨X⟩
1: ──RY(0.2)──────╰X──MeasureNode──│───┤
2: ──PrepareNode───────────────────╰X──┤
a: ──RX(0.3)──────╭C──╭X──╭C────────────RX(0.5)──╭┤ ⟨Y ⊗ Z⟩
b: ──RY(0.4)──────╰X──│───╰X────────────RY(0.6)──╰┤ ⟨Y ⊗ Z⟩
1: ──PrepareNode──────╰C───MeasureNode────────────┤
Alternatively, if all we want to do is to find the optimal way to fit a circuit onto a smaller
device, a :class:`~.CutStrategy` can be used to populate the necessary explorations of cutting
parameters. As an extreme example, if the only device at our disposal is a 2-qubit device, a
simple cut strategy is to simply specify the the ``max_free_wires`` argument (or equivalently
directly passing a :class:`~.Device` to the ``device`` argument):
>>> cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2)
>>> print(cut_strategy.get_cut_kwargs(graph))
[{'num_fragments': 2, 'imbalance': 0.5714285714285714},
{'num_fragments': 3, 'imbalance': 1.4},
{'num_fragments': 4, 'imbalance': 1.75},
{'num_fragments': 5, 'imbalance': 2.3333333333333335},
{'num_fragments': 6, 'imbalance': 2.0},
{'num_fragments': 7, 'imbalance': 3.0},
{'num_fragments': 8, 'imbalance': 2.5},
{'num_fragments': 9, 'imbalance': 2.0},
{'num_fragments': 10, 'imbalance': 1.5},
{'num_fragments': 11, 'imbalance': 1.0},
{'num_fragments': 12, 'imbalance': 0.5},
{'num_fragments': 13, 'imbalance': 0.05},
{'num_fragments': 14, 'imbalance': 0.1}]
The printed list above shows all the possible cutting configurations one can attempt to perform
in order to search for the optimal cut. This is done by directly passing a
:class:`~.CutStrategy` to :func:`~.find_and_place_cuts`:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
cut_strategy=cut_strategy,
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX──//─╭C──//────────╭C──//─────────┤ ╭<X@Y@Z>
1: ──RY──//─╰X──//─╭C──//─╰X─────────────┤ │
a: ──RX──//─╭C──//─╰X──//─╭C──//──RX──//─┤ ├<X@Y@Z>
b: ──RY──//─╰X──//────────╰X──//──RY─────┤ ╰<X@Y@Z>
As one can tell, quite a few cuts have to be made in order to execute the circuit on solely
2-qubit devices. To verify, let's print the fragments:
>>> qml.transforms.qcut.replace_wire_cut_nodes(cut_graph)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX──MeasureNode─┤
1: ──RY──MeasureNode─┤
a: ──RX──MeasureNode─┤
b: ──RY──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──MeasureNode─┤
a: ──PrepareNode─╭C──MeasureNode─┤
b: ──PrepareNode─╰X──MeasureNode─┤
1: ──PrepareNode─╭C──MeasureNode─┤
a: ──PrepareNode─╰X──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──────────────┤
b: ──PrepareNode─╭X──MeasureNode─┤
a: ──PrepareNode─╰C──MeasureNode─┤
a: ──PrepareNode──RX──MeasureNode─┤
b: ──PrepareNode──RY─┤ <Z>
0: ──PrepareNode─┤ <X>
a: ──PrepareNode─┤ <Y>
"""
cut_graph = _remove_existing_cuts(graph)
if isinstance(cut_strategy, CutStrategy):
cut_kwargs_probed = cut_strategy.get_cut_kwargs(cut_graph)
# Need to reseed if a seed is passed:
seed = kwargs.pop("seed", None)
seeds = np.random.default_rng(seed).choice(2**15, cut_strategy.trials_per_probe).tolist()
cut_edges_probed = {
(cut_kwargs["num_fragments"], trial_id): cut_method(
cut_graph,
**{
**cut_kwargs,
**kwargs,
"seed": seed,
}, # kwargs has higher precedence for colliding keys
)
for cut_kwargs in cut_kwargs_probed
for trial_id, seed in zip(range(cut_strategy.trials_per_probe), seeds)
}
valid_cut_edges = {}
for (num_partitions, _), cut_edges in cut_edges_probed.items():
# The easiest way to tell if a cut is valid is to just do the fragment graph.
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
num_cuts = sum(isinstance(n, WireCut) for n in cut_graph.nodes)
replace_wire_cut_nodes(cut_graph)
frags, comm = fragment_graph(cut_graph)
max_frag_degree = max(dict(comm.degree()).values())
if _is_valid_cut(
fragments=frags,
num_cuts=num_cuts,
max_frag_degree=max_frag_degree,
num_fragments_requested=num_partitions,
cut_candidates=valid_cut_edges,
max_free_wires=cut_strategy.max_free_wires,
):
key = (len(frags), max_frag_degree)
valid_cut_edges[key] = cut_edges
if len(valid_cut_edges) < 1:
raise ValueError(
"Unable to find a circuit cutting that satisfies all constraints. "
"Are the constraints too strict?"
)
cut_edges = _get_optim_cut(valid_cut_edges, local_measurement=local_measurement)
else:
cut_edges = cut_method(cut_graph, **kwargs)
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
if replace_wire_cuts:
replace_wire_cut_nodes(cut_graph)
return cut_graph
def _is_valid_cut(
fragments,
num_cuts,
max_frag_degree,
num_fragments_requested,
cut_candidates,
max_free_wires,
):
"""Helper function for determining if a cut is a valid canditate."""
# pylint: disable=too-many-arguments
k = len(fragments)
key = (k, max_frag_degree)
correct_num_fragments = k <= num_fragments_requested
best_candidate_yet = (key not in cut_candidates) or (len(cut_candidates[key]) > num_cuts)
# pylint: disable=no-member
all_fragments_fit = all(
len(graph_to_tape(f).wires) <= max_free_wires for j, f in enumerate(fragments)
)
return correct_num_fragments and best_candidate_yet and all_fragments_fit
def _get_optim_cut(valid_cut_edges, local_measurement=False):
"""Picks out the best cut from a dict of valid candidate cuts."""
if local_measurement:
min_max_node_degree = min(max_node_degree for _, max_node_degree in valid_cut_edges)
optim_cuts = {
k: cut_edges
for (k, max_node_degree), cut_edges in valid_cut_edges.items()
if (max_node_degree == min_max_node_degree)
}
else:
min_cuts = min(len(cut_edges) for cut_edges in valid_cut_edges.values())
optim_cuts = {
k: cut_edges
for (k, _), cut_edges in valid_cut_edges.items()
if (len(cut_edges) == min_cuts)
}
return optim_cuts[min(optim_cuts)] # choose the lowest num_fragments among best ones.
|
[
"pennylane.math.argsort",
"pennylane.math.transpose",
"kahypar.Context",
"pennylane.execute",
"pennylane.S",
"pennylane.math.power",
"dataclasses.dataclass",
"inspect.signature",
"pennylane.wires.Wires",
"kahypar.partition",
"networkx.weakly_connected_components",
"pennylane.numpy.array",
"pennylane.numpy.mean",
"copy.copy",
"networkx.has_path",
"pennylane.apply",
"pennylane.ops.qubit.non_parametric_ops.WireCut",
"pennylane.math.gather",
"kahypar.Hypergraph",
"pathlib.Path",
"pennylane.math.array",
"pennylane.Projector",
"pennylane.math.get_interface",
"pennylane.PauliX",
"pennylane.PauliZ",
"pennylane.math.flatten",
"pennylane.math.tensordot",
"pennylane.math.convert_like",
"pennylane.math.reshape",
"pennylane.numpy.prod",
"warnings.warn",
"pennylane.math.concatenate",
"pennylane.tape.stop_recording",
"opt_einsum.contract",
"pennylane.wires.Wires.all_wires",
"networkx.MultiDiGraph",
"pennylane.tape.QuantumTape",
"uuid.uuid4",
"opt_einsum.get_symbol",
"pennylane.expval",
"pennylane.numpy.random.default_rng",
"pennylane.PauliY",
"pennylane.operation.Tensor",
"pennylane.Identity",
"pennylane.grouping.partition_pauli_group",
"functools.partial",
"pennylane.measurements.MeasurementProcess",
"pennylane.Hadamard",
"itertools.compress",
"pennylane.grouping.string_to_pauli_word",
"pennylane.numpy.hstack"
] |
[((57070, 57184), 'pennylane.math.array', 'qml.math.array', (['[[1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 2.0, 0.0], [-1.0, -1.0, 0.0, 2.0], [1.0,\n -1.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 2.0, 0.0], [-1.0, -1.0, \n 0.0, 2.0], [1.0, -1.0, 0.0, 0.0]])\n', (57084, 57184), True, 'import pennylane as qml\n'), ((82304, 82315), 'dataclasses.dataclass', 'dataclass', ([], {}), '()\n', (82313, 82315), False, 'from dataclasses import InitVar, dataclass\n'), ((7328, 7342), 'networkx.MultiDiGraph', 'MultiDiGraph', ([], {}), '()\n', (7340, 7342), False, 'from networkx import MultiDiGraph, has_path, weakly_connected_components\n'), ((11254, 11293), 'networkx.weakly_connected_components', 'weakly_connected_components', (['graph_copy'], {}), '(graph_copy)\n', (11281, 11293), False, 'from networkx import MultiDiGraph, has_path, weakly_connected_components\n'), ((11405, 11419), 'networkx.MultiDiGraph', 'MultiDiGraph', ([], {}), '()\n', (11417, 11419), False, 'from networkx import MultiDiGraph, has_path, weakly_connected_components\n'), ((15219, 15266), 'pennylane.wires.Wires.all_wires', 'Wires.all_wires', (['[n.wires for n in graph.nodes]'], {}), '([n.wires for n in graph.nodes])\n', (15234, 15266), False, 'from pennylane.wires import Wires\n'), ((18975, 18993), 'pennylane.Identity', 'qml.Identity', (['wire'], {}), '(wire)\n', (18987, 18993), True, 'import pennylane as qml\n'), ((19027, 19043), 'pennylane.PauliX', 'qml.PauliX', (['wire'], {}), '(wire)\n', (19037, 19043), True, 'import pennylane as qml\n'), ((19078, 19096), 'pennylane.Hadamard', 'qml.Hadamard', (['wire'], {}), '(wire)\n', (19090, 19096), True, 'import pennylane as qml\n'), ((19132, 19148), 'pennylane.PauliX', 'qml.PauliX', (['wire'], {}), '(wire)\n', (19142, 19148), True, 'import pennylane as qml\n'), ((19153, 19171), 'pennylane.Hadamard', 'qml.Hadamard', (['wire'], {}), '(wire)\n', (19165, 19171), True, 'import pennylane as qml\n'), ((19207, 19225), 'pennylane.Hadamard', 'qml.Hadamard', (['wire'], {}), '(wire)\n', (19219, 19225), True, 'import pennylane as qml\n'), ((19230, 19247), 'pennylane.S', 'qml.S', ([], {'wires': 'wire'}), '(wires=wire)\n', (19235, 19247), True, 'import pennylane as qml\n'), ((19284, 19300), 'pennylane.PauliX', 'qml.PauliX', (['wire'], {}), '(wire)\n', (19294, 19300), True, 'import pennylane as qml\n'), ((19305, 19323), 'pennylane.Hadamard', 'qml.Hadamard', (['wire'], {}), '(wire)\n', (19317, 19323), True, 'import pennylane as qml\n'), ((19328, 19345), 'pennylane.S', 'qml.S', ([], {'wires': 'wire'}), '(wires=wire)\n', (19333, 19345), True, 'import pennylane as qml\n'), ((57016, 57049), 'opt_einsum.contract', 'contract', (['eqn', '*tensors'], {}), '(eqn, *tensors, **kwargs)\n', (57024, 57049), False, 'from opt_einsum import contract, get_symbol\n'), ((58519, 58564), 'pennylane.math.reshape', 'qml.math.reshape', (['results', 'intermediate_shape'], {}), '(results, intermediate_shape)\n', (58535, 58564), True, 'import pennylane as qml\n'), ((58593, 58635), 'pennylane.grouping.partition_pauli_group', 'qml.grouping.partition_pauli_group', (['n_meas'], {}), '(n_meas)\n', (58627, 58635), True, 'import pennylane as qml\n'), ((58713, 58743), 'pennylane.math.argsort', 'qml.math.argsort', (['grouped_flat'], {}), '(grouped_flat)\n', (58729, 58743), True, 'import pennylane as qml\n'), ((59119, 59169), 'pennylane.math.reshape', 'qml.math.reshape', (['intermediate_tensor', 'final_shape'], {}), '(intermediate_tensor, final_shape)\n', (59135, 59169), True, 'import pennylane as qml\n'), ((59206, 59265), 'pennylane.math.convert_like', 'qml.math.convert_like', (['CHANGE_OF_BASIS', 'intermediate_tensor'], {}), '(CHANGE_OF_BASIS, intermediate_tensor)\n', (59227, 59265), True, 'import pennylane as qml\n'), ((59822, 59865), 'pennylane.math.transpose', 'qml.math.transpose', (['final_tensor'], {'axes': 'axes'}), '(final_tensor, axes=axes)\n', (59840, 59865), True, 'import pennylane as qml\n'), ((59887, 59928), 'pennylane.math.power', 'qml.math.power', (['(2)', '(-(n_meas + n_prep) / 2)'], {}), '(2, -(n_meas + n_prep) / 2)\n', (59901, 59928), True, 'import pennylane as qml\n'), ((63742, 63771), 'pennylane.math.concatenate', 'qml.math.concatenate', (['results'], {}), '(results)\n', (63762, 63771), True, 'import pennylane as qml\n'), ((106422, 106439), 'kahypar.Context', 'kahypar.Context', ([], {}), '()\n', (106437, 106439), False, 'import kahypar\n'), ((107086, 107124), 'kahypar.partition', 'kahypar.partition', (['hypergraph', 'context'], {}), '(hypergraph, context)\n', (107103, 107124), False, 'import kahypar\n'), ((15498, 15511), 'copy.copy', 'copy.copy', (['op'], {}), '(op)\n', (15507, 15511), False, 'import copy\n'), ((15597, 15610), 'copy.copy', 'copy.copy', (['op'], {}), '(op)\n', (15606, 15610), False, 'import copy\n'), ((15706, 15719), 'pennylane.tape.QuantumTape', 'QuantumTape', ([], {}), '()\n', (15717, 15719), False, 'from pennylane.tape import QuantumTape\n'), ((22998, 23022), 'pennylane.Identity', 'qml.Identity', ([], {'wires': 'wire'}), '(wires=wire)\n', (23010, 23022), True, 'import pennylane as qml\n'), ((23060, 23082), 'pennylane.PauliX', 'qml.PauliX', ([], {'wires': 'wire'}), '(wires=wire)\n', (23070, 23082), True, 'import pennylane as qml\n'), ((23120, 23142), 'pennylane.PauliY', 'qml.PauliY', ([], {'wires': 'wire'}), '(wires=wire)\n', (23130, 23142), True, 'import pennylane as qml\n'), ((23180, 23202), 'pennylane.PauliZ', 'qml.PauliZ', ([], {'wires': 'wire'}), '(wires=wire)\n', (23190, 23202), True, 'import pennylane as qml\n'), ((27989, 28008), 'pennylane.math.flatten', 'qml.math.flatten', (['r'], {}), '(r)\n', (28005, 28008), True, 'import pennylane as qml\n'), ((32045, 32071), 'pennylane.numpy.hstack', 'np.hstack', (['sample_terminal'], {}), '(sample_terminal)\n', (32054, 32071), True, 'from pennylane import numpy as np\n'), ((32093, 32114), 'pennylane.numpy.hstack', 'np.hstack', (['sample_mid'], {}), '(sample_mid)\n', (32102, 32114), True, 'from pennylane import numpy as np\n'), ((32599, 32618), 'pennylane.numpy.prod', 'np.prod', (['sample_mid'], {}), '(sample_mid)\n', (32606, 32618), True, 'from pennylane import numpy as np\n'), ((32659, 32695), 'pennylane.numpy.prod', 'np.prod', (['[evals[s] for s in setting]'], {}), '([evals[s] for s in setting])\n', (32666, 32695), True, 'from pennylane import numpy as np\n'), ((32799, 32815), 'pennylane.numpy.mean', 'np.mean', (['expvals'], {}), '(expvals)\n', (32806, 32815), True, 'from pennylane import numpy as np\n'), ((48878, 48970), 'functools.partial', 'partial', (['qcut_processing_fn_sample'], {'communication_graph': 'communication_graph', 'shots': 'shots'}), '(qcut_processing_fn_sample, communication_graph=communication_graph,\n shots=shots)\n', (48885, 48970), False, 'from functools import partial\n'), ((50850, 51033), 'pennylane.execute', 'qml.execute', (['tapes'], {'device': 'qnode.device', 'gradient_fn': 'gradient_fn', 'interface': 'interface', 'max_diff': 'max_diff', 'override_shots': '(1)', 'gradient_kwargs': 'gradient_kwargs'}), '(tapes, device=qnode.device, gradient_fn=gradient_fn, interface=\n interface, max_diff=max_diff, override_shots=1, gradient_kwargs=\n gradient_kwargs, **execute_kwargs)\n', (50861, 51033), True, 'import pennylane as qml\n'), ((58752, 58795), 'pennylane.math.get_interface', 'qml.math.get_interface', (['intermediate_tensor'], {}), '(intermediate_tensor)\n', (58774, 58795), True, 'import pennylane as qml\n'), ((58889, 58941), 'pennylane.math.gather', 'qml.math.gather', (['intermediate_tensor', 'order'], {'axis': '(-1)'}), '(intermediate_tensor, order, axis=-1)\n', (58904, 58941), True, 'import pennylane as qml\n'), ((59344, 59404), 'pennylane.math.tensordot', 'qml.math.tensordot', (['change_of_basis', 'final_tensor'], {'axes': 'axes'}), '(change_of_basis, final_tensor, axes=axes)\n', (59362, 59404), True, 'import pennylane as qml\n'), ((77433, 77598), 'functools.partial', 'partial', (['qcut_processing_fn'], {'communication_graph': 'communication_graph', 'prepare_nodes': 'prepare_nodes', 'measure_nodes': 'measure_nodes', 'use_opt_einsum': 'use_opt_einsum'}), '(qcut_processing_fn, communication_graph=communication_graph,\n prepare_nodes=prepare_nodes, measure_nodes=measure_nodes,\n use_opt_einsum=use_opt_einsum)\n', (77440, 77598), False, 'from functools import partial\n'), ((81596, 81609), 'copy.copy', 'copy.copy', (['op'], {}), '(op)\n', (81605, 81609), False, 'import copy\n'), ((81654, 81667), 'copy.copy', 'copy.copy', (['op'], {}), '(op)\n', (81663, 81667), False, 'import copy\n'), ((81707, 81720), 'pennylane.tape.QuantumTape', 'QuantumTape', ([], {}), '()\n', (81718, 81720), False, 'from pennylane.tape import QuantumTape\n'), ((106110, 106212), 'kahypar.Hypergraph', 'kahypar.Hypergraph', (['nv', 'ne', 'edge_splits', 'adjacent_nodes', 'num_fragments', 'edge_weights', 'node_weights'], {}), '(nv, ne, edge_splits, adjacent_nodes, num_fragments,\n edge_weights, node_weights)\n', (106128, 106212), False, 'import kahypar\n'), ((106336, 106406), 'kahypar.Hypergraph', 'kahypar.Hypergraph', (['nv', 'ne', 'edge_splits', 'adjacent_nodes', 'num_fragments'], {}), '(nv, ne, edge_splits, adjacent_nodes, num_fragments)\n', (106354, 106406), False, 'import kahypar\n'), ((107303, 107339), 'itertools.compress', 'compress', (['graph.edges', 'cut_edge_mask'], {}), '(graph.edges, cut_edge_mask)\n', (107311, 107339), False, 'from itertools import compress, product\n'), ((109981, 110000), 'pennylane.ops.qubit.non_parametric_ops.WireCut', 'WireCut', ([], {'wires': 'wire'}), '(wires=wire)\n', (109988, 110000), False, 'from pennylane.ops.qubit.non_parametric_ops import WireCut\n'), ((111068, 111218), 'warnings.warn', 'warnings.warn', (['"""The circuit contains `MeasureNode` or `PrepareNode` operations that are not paired up correctly. Please check."""', 'UserWarning'], {}), "(\n 'The circuit contains `MeasureNode` or `PrepareNode` operations that are not paired up correctly. Please check.'\n , UserWarning)\n", (111081, 111218), False, 'import warnings\n'), ((15781, 15819), 'pennylane.wires.Wires', 'Wires', (['[wire_map[w] for w in op.wires]'], {}), '([wire_map[w] for w in op.wires])\n', (15786, 15819), False, 'from pennylane.wires import Wires\n'), ((15931, 15940), 'pennylane.apply', 'apply', (['op'], {}), '(op)\n', (15936, 15940), False, 'from pennylane import apply, expval\n'), ((18592, 18601), 'pennylane.expval', 'expval', (['g'], {}), '(g)\n', (18598, 18601), False, 'from pennylane import apply, expval\n'), ((29617, 29634), 'pennylane.numpy.hstack', 'np.hstack', (['sample'], {}), '(sample)\n', (29626, 29634), True, 'from pennylane import numpy as np\n'), ((29670, 29687), 'pennylane.numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (29678, 29687), True, 'from pennylane import numpy as np\n'), ((48635, 48796), 'functools.partial', 'partial', (['qcut_processing_fn_mc'], {'communication_graph': 'communication_graph', 'settings': 'settings', 'shots': 'shots', 'classical_processing_fn': 'classical_processing_fn'}), '(qcut_processing_fn_mc, communication_graph=communication_graph,\n settings=settings, shots=shots, classical_processing_fn=\n classical_processing_fn)\n', (48642, 48796), False, 'from functools import partial\n'), ((49316, 49345), 'inspect.signature', 'inspect.signature', (['qnode.func'], {}), '(qnode.func)\n', (49333, 49345), False, 'import inspect\n'), ((81786, 81824), 'pennylane.wires.Wires', 'Wires', (['[wire_map[w] for w in op.wires]'], {}), '([wire_map[w] for w in op.wires])\n', (81791, 81824), False, 'from pennylane.wires import Wires\n'), ((81871, 81880), 'pennylane.apply', 'apply', (['op'], {}), '(op)\n', (81876, 81880), False, 'from pennylane import apply, expval\n'), ((82268, 82279), 'pennylane.apply', 'apply', (['meas'], {}), '(meas)\n', (82273, 82279), False, 'from pennylane import apply, expval\n'), ((107005, 107032), 'pennylane.numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (107026, 107032), True, 'from pennylane import numpy as np\n'), ((1759, 1771), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1769, 1771), False, 'import uuid\n'), ((2064, 2076), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2074, 2076), False, 'import uuid\n'), ((7964, 8004), 'pennylane.measurements.MeasurementProcess', 'MeasurementProcess', (['m.return_type'], {'obs': 'o'}), '(m.return_type, obs=o)\n', (7982, 8004), False, 'from pennylane.measurements import Expectation, MeasurementProcess, Sample\n'), ((12722, 12757), 'networkx.has_path', 'has_path', (['communication_graph', 'i', 't'], {}), '(communication_graph, i, t)\n', (12730, 12757), False, 'from networkx import MultiDiGraph, has_path, weakly_connected_components\n'), ((17034, 17073), 'pennylane.wires.Wires', 'Wires', (['[wire_map[w] for w in obs.wires]'], {}), '([wire_map[w] for w in obs.wires])\n', (17039, 17073), False, 'from pennylane.wires import Wires\n'), ((18905, 18919), 'copy.copy', 'copy.copy', (['obs'], {}), '(obs)\n', (18914, 18919), False, 'import copy\n'), ((22179, 22192), 'pennylane.tape.QuantumTape', 'QuantumTape', ([], {}), '()\n', (22190, 22192), False, 'from pennylane.tape import QuantumTape\n'), ((26939, 26961), 'pennylane.tape.QuantumTape', 'qml.tape.QuantumTape', ([], {}), '()\n', (26959, 26961), True, 'import pennylane as qml\n'), ((32162, 32173), 'pennylane.numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (32170, 32173), True, 'from pennylane import numpy as np\n'), ((32175, 32186), 'pennylane.numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (32183, 32186), True, 'from pennylane import numpy as np\n'), ((32230, 32242), 'pennylane.numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (32238, 32242), True, 'from pennylane import numpy as np\n'), ((32244, 32255), 'pennylane.numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (32252, 32255), True, 'from pennylane import numpy as np\n'), ((82177, 82216), 'pennylane.wires.Wires', 'Wires', (['[wire_map[w] for w in obs.wires]'], {}), '([wire_map[w] for w in obs.wires])\n', (82182, 82216), False, 'from pennylane.wires import Wires\n'), ((98019, 98158), 'warnings.warn', 'warnings.warn', (['f"""The provided `k_lower={k_lower}` is less than the lowest allowed value, will override and set `k_lower={k_lb}`."""'], {}), "(\n f'The provided `k_lower={k_lower}` is less than the lowest allowed value, will override and set `k_lower={k_lb}`.'\n )\n", (98032, 98158), False, 'import warnings\n'), ((98309, 98412), 'warnings.warn', 'warnings.warn', (['f"""The attempted number of fragments seems high with lower bound at {k_lower}."""'], {}), "(\n f'The attempted number of fragments seems high with lower bound at {k_lower}.'\n )\n", (98322, 98412), False, 'import warnings\n'), ((17177, 17188), 'pennylane.apply', 'apply', (['meas'], {}), '(meas)\n', (17182, 17188), False, 'from pennylane import apply, expval\n'), ((17369, 17384), 'pennylane.expval', 'qml.expval', (['obs'], {}), '(obs)\n', (17379, 17384), True, 'import pennylane as qml\n'), ((21887, 21934), 'pennylane.grouping.string_to_pauli_word', 'string_to_pauli_word', (['paulis'], {'wire_map': 'wire_map'}), '(paulis, wire_map=wire_map)\n', (21907, 21934), False, 'from pennylane.grouping import string_to_pauli_word\n'), ((22499, 22524), 'pennylane.tape.stop_recording', 'qml.tape.stop_recording', ([], {}), '()\n', (22522, 22524), True, 'import pennylane as qml\n'), ((22667, 22678), 'pennylane.apply', 'apply', (['meas'], {}), '(meas)\n', (22672, 22678), False, 'from pennylane import apply, expval\n'), ((27335, 27350), 'pennylane.apply', 'qml.apply', (['meas'], {}), '(meas)\n', (27344, 27350), True, 'import pennylane as qml\n'), ((82048, 82087), 'pennylane.wires.Wires', 'Wires', (['[wire_map[w] for w in obs.wires]'], {}), '([wire_map[w] for w in obs.wires])\n', (82053, 82087), False, 'from pennylane.wires import Wires\n'), ((106478, 106492), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (106482, 106492), False, 'from pathlib import Path\n'), ((8193, 8220), 'pennylane.Projector', 'qml.Projector', (['[1]'], {'wires': 'w'}), '([1], wires=w)\n', (8206, 8220), True, 'import pennylane as qml\n'), ((17305, 17325), 'pennylane.operation.Tensor', 'Tensor', (['*observables'], {}), '(*observables)\n', (17311, 17325), False, 'from pennylane.operation import Operation, Operator, Tensor\n'), ((56303, 56318), 'opt_einsum.get_symbol', 'get_symbol', (['ctr'], {}), '(ctr)\n', (56313, 56318), False, 'from opt_einsum import contract, get_symbol\n'), ((119314, 119341), 'pennylane.numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (119335, 119341), True, 'from pennylane import numpy as np\n'), ((22467, 22476), 'pennylane.apply', 'apply', (['op'], {}), '(op)\n', (22472, 22476), False, 'from pennylane import apply, expval\n'), ((27253, 27266), 'pennylane.apply', 'qml.apply', (['op'], {}), '(op)\n', (27262, 27266), True, 'import pennylane as qml\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QProgressDialog
class ProgressBarDialog(QWidget):
def __init__(self, title='', label='', minValue=0, maxValue=100, parent=None):
super(ProgressBarDialog, self).__init__(parent)
self.process_bar = QProgressDialog(self)
self.set_bar_window_title(title)
self.set_label_text(label)
self.set_min_value(minValue)
self.set_max_value(maxValue)
self.process_bar.setWindowModality(Qt.WindowModal)
self.setGeometry(800, 300, 580, 570)
self.process_bar.canceled.connect(self.close_bar)
def set_bar_window_title(self, text):
self.process_bar.setWindowTitle(text)
self.setWindowTitle(text)
def set_label_text(self, text):
self.process_bar.setLabelText(text)
def set_min_value(self, minValue):
self.process_bar.setMinimum(minValue)
def set_max_value(self, maxvalue):
self.process_bar.setMaximum(maxvalue)
def set_value(self, value):
self.process_bar.setValue(value)
def close_bar(self):
self.process_bar.close()
def reset_bar(self):
self.process_bar = None
def show(self):
self.process_bar.show()
def is_valid(self):
return bool(self.process_bar)
|
[
"PyQt5.QtWidgets.QProgressDialog"
] |
[((676, 697), 'PyQt5.QtWidgets.QProgressDialog', 'QProgressDialog', (['self'], {}), '(self)\n', (691, 697), False, 'from PyQt5.QtWidgets import QWidget, QProgressDialog\n')]
|
#Exercício Python 39: Faça um programa que leia o ano de nascimento de um jovem e informe, de acordo com a sua idade, se ele ainda vai se alistar ao serviço militar, se é a hora exata de se alistar ou se já passou do tempo do alistamento. Seu programa também deverá mostrar o tempo que falta ou que passou do prazo.
import datetime
current_year = datetime.datetime.today().year
ano_nasc = int(input('Informe o ano de seu nascimento: '))
idade_alistamento = current_year - ano_nasc
if idade_alistamento < 18:
print('Ainda não está na hora de se alistar')
print(f'Sua idade ainda é {idade_alistamento} anos, faltam {18 - idade_alistamento } anos. Aguarde mais um pouco!')
elif idade_alistamento == 18:
print(f'Sua idade já é {idade_alistamento} anos')
print('Você está na idade de se alistar. Não perca tempo!')
else:
print('Você passou do prazo de alistamento.')
print(f'Sua idade é {idade_alistamento} anos, já passou {idade_alistamento - 18} anos. Regularize a situação!')
|
[
"datetime.datetime.today"
] |
[((348, 373), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (371, 373), False, 'import datetime\n')]
|
######################################
# Import and initialize the librarys #
#####################################
from code.pygame_objects import *
from code.algorithm.bubblesort import bubblesort
from code.algorithm.insertionsort import insertionsort
from code.algorithm.bogosort import bogosort
from code.algorithm.mergesort import mergesort
from code.algorithm.quicksort import quicksort
from code.algorithm.radixsort import radixsort
from code.algorithm.selectionsort import selectionsort
from code.algorithm.commonFunc import commonFunc
#################
# Setup logging #
#################
filename = os.path.basename(__file__).split('.')[0]
logger = log.get_logger(filename)
logger.info('Loading up {}...'.format(filename))
sort_screen = screen(
name = 'sort',
surfaceParameters = {
'frame': coord(w=1024, h=768)
},
objectsParameters = {
'background': {
'type': 'object',
'frame': {
'image': coord(w=1024, h=768)
},
},
'sort_title': {
'type': 'title',
'frame': {
'image': coord(w=1024, h=135)
},
},
'back': {
'type': 'button',
'frame': {
'box': coord(x=71, y=41, w=112, h=61),
'image': coord(x=71, y=41, w=112, h=61)
},
'runclass': runclass(action='go_back')
},
'info': {
'type': 'button',
'frame': {
'box': coord(x=841, y=40, w=112, h=61),
'image': coord(x=841, y=40, w=112, h=61),
},
'runclass': runclass(action='info')
},
'speed': {
'type': 'text',
'frame': {
'image': coord(x=349, y=630, w=254, h=40),
'text': coord(x=349, y=630, w=254, h=40)
},
'data': text(
text = '10',
editable = False,
suffix = ' sec per move',
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'moves': {
'type': 'text',
'frame': {
'image': coord(x=436, y=677, w=112, h=40),
'text': coord(x=436, y=677, w=112, h=40)
},
'data': moves(
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'time_taken': {
'type': 'text',
'frame': {
'image': coord(x=768, y=630, w=177, h=40),
'text': coord(x=768, y=630, w=177, h=40)
},
'data': timer(
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'list_length': {
'type': 'text',
'frame': {
'image': coord(x=759, y=677, w=112, h=186),
'text': coord(x=759, y=677, w=112, h=186)
},
'data': text(
text = '100',
editable = False,
suffix = ' bars',
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'sortbox': {
'type': 'object',
'frame': {
'box': coord(x=52, y=145, w=922, h=430),
'image': coord(x=52, y=145, w=922, h=430)
},
'data': sortbars(
bars=10,
),
'dataAddSelf': True,
}
}
)
runSort = {
'Bubble sort': bubblesort.run,
'Insertion sort': insertionsort.run,
'Merge sort': mergesort.run,
'Quick sort': quicksort.run,
'Radix sort': radixsort.run,
'Bogo sort': bogosort.run,
'Selection sort': selectionsort.run
}
class sort:
@staticmethod
def run(screen, sortType:str, bars:int, speed:float):
# Set data from parent
sort_screen.objects.sort_title.switchState(sortType, withDisplay=False)
if sort_screen.objects.sortbox.data.bars != int(bars): sort_screen.objects.sortbox.data.bars = int(bars)
else: sort_screen.objects.sortbox.data.genBars()
sort_screen.objects.speed.data.setText(str(speed), withDisplay=False)
sort_screen.objects.list_length.data.setText(str(bars), withDisplay=False)
sort_screen.objects.moves.data.reset()
sort_screen.objects.time_taken.data.resetTimer()
# Display sort screen
sort_screen.surface.display()
# Buffer time before sort starts
action_result = commonFunc.waitAction(sort_screen, 0.5)
if action_result != None: return action_result
sort_result = runSort[sortType](sort_screen, speed)
if sort_result != None: return sort_result
while True:
# Get check for interaction with screen
action_result = sort_screen.event.action()
# No action
if action_result == None: continue
# When program is set to close
if action_result.contains('outcome','__quit__'): return '__quit__'
# Going back
if action_result.contains('outcome', 'go_back'): return '__back__'
# Load back screen
if action_result.contains('outcome', '__back__'): sort_screen.surface.display(withLoad=False)
|
[
"code.algorithm.commonFunc.commonFunc.waitAction"
] |
[((5149, 5188), 'code.algorithm.commonFunc.commonFunc.waitAction', 'commonFunc.waitAction', (['sort_screen', '(0.5)'], {}), '(sort_screen, 0.5)\n', (5170, 5188), False, 'from code.algorithm.commonFunc import commonFunc\n')]
|
#!/usr/bin/env python
import codecs
from os import path
from setuptools import setup
pwd = path.abspath(path.dirname(__file__))
with codecs.open(path.join(pwd, 'README.md'), 'r', encoding='utf8') as input:
long_description = input.read()
version='1.7'
setup(
name='Perdy',
version=version,
license='MIT',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/eddo888/perdy',
download_url='https://github.com/eddo888/perdy/archive/%s.tar.gz'%version,
author='<NAME>',
author_email='<EMAIL>',
packages=[
'Perdy',
],
install_requires=[
'pytz',
'arrow',
'xmltodict',
'PyYAML',
'jsonpath',
'argcomplete',
'Baubles',
],
scripts=[
'bin/parser.py',
'bin/pyson.py',
'bin/colourize.py',
],
)
|
[
"os.path.join",
"os.path.dirname",
"setuptools.setup"
] |
[((261, 761), 'setuptools.setup', 'setup', ([], {'name': '"""Perdy"""', 'version': 'version', 'license': '"""MIT"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/eddo888/perdy"""', 'download_url': "('https://github.com/eddo888/perdy/archive/%s.tar.gz' % version)", 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['Perdy']", 'install_requires': "['pytz', 'arrow', 'xmltodict', 'PyYAML', 'jsonpath', 'argcomplete', 'Baubles']", 'scripts': "['bin/parser.py', 'bin/pyson.py', 'bin/colourize.py']"}), "(name='Perdy', version=version, license='MIT', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/eddo888/perdy', download_url=\n 'https://github.com/eddo888/perdy/archive/%s.tar.gz' % version, author=\n '<NAME>', author_email='<EMAIL>', packages=['Perdy'], install_requires=\n ['pytz', 'arrow', 'xmltodict', 'PyYAML', 'jsonpath', 'argcomplete',\n 'Baubles'], scripts=['bin/parser.py', 'bin/pyson.py', 'bin/colourize.py'])\n", (266, 761), False, 'from setuptools import setup\n'), ((106, 128), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (118, 128), False, 'from os import path\n'), ((147, 174), 'os.path.join', 'path.join', (['pwd', '"""README.md"""'], {}), "(pwd, 'README.md')\n", (156, 174), False, 'from os import path\n')]
|
from tabulate import tabulate
from slack.errors import SlackApiError
import sys
import logging
import slack
class Slackalert:
"""To send cost report on slack."""
def __init__(self, channel=None, slack_token=None):
self.channel = channel
self.slack_token = slack_token
logging.basicConfig(level=logging.WARNING)
self.logger = logging.getLogger()
def get_resource_list(self, resource_name, resource_info, resource_header, resource_list, resource_savings):
"""Returns all the idle resource information in a dictionary format."""
resource_list.insert(0, resource_header)
resource_info[resource_name] = {}
resource_info[resource_name]['Resources'] = resource_list
resource_info[resource_name]['Savings'] = resource_savings
return resource_info
def slack_alert(self, resource_info, account_name, total_savings):
"""Creates a txt file which contains the cost report and sends to the slack channel."""
try:
client = slack.WebClient(token=self.slack_token)
f = open("/tmp/cost_optimization_report.txt", "w+")
for res in resource_info.keys():
#Converts resource info dictionary to tabular format.
f.write('\n' + 'Resource: ' + res + '\n')
resource_table = tabulate(resource_info[res]['Resources'][1:],
headers=resource_info[res]['Resources'][0], tablefmt="grid",
disable_numparse=True)
f.write('\n' + resource_table + '\n \n' + 'Savings: $' + str(resource_info[res]['Savings']) + '\n')
f.close()
response = client.files_upload(
file='/tmp/cost_optimization_report.txt',
initial_comment='Cost Optimization Report | ' + account_name + ' | Total Savings: $' + str(total_savings),
channels=self.channel
)
print("Sending the Cost Optimization report to slack "+ self.channel)
except SlackApiError as e:
"""You will get a SlackApiError if "ok" is False."""
assert e.response["ok"] is False
assert e.response["error"]
"""str like 'invalid_auth', 'channel_not_found'."""
self.logger.error("Slack api error: {e.response['error']} | Error in slack_send.py")
sys.exit(1)
except Exception as e:
self.logger.error(
"Error on line {} in slack_send.py".format(sys.exc_info()[-1].tb_lineno) + " | Message: " +
str(e))
sys.exit(1)
|
[
"logging.basicConfig",
"tabulate.tabulate",
"logging.getLogger",
"sys.exc_info",
"slack.WebClient",
"sys.exit"
] |
[((302, 344), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING'}), '(level=logging.WARNING)\n', (321, 344), False, 'import logging\n'), ((367, 386), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (384, 386), False, 'import logging\n'), ((1037, 1076), 'slack.WebClient', 'slack.WebClient', ([], {'token': 'self.slack_token'}), '(token=self.slack_token)\n', (1052, 1076), False, 'import slack\n'), ((1356, 1490), 'tabulate.tabulate', 'tabulate', (["resource_info[res]['Resources'][1:]"], {'headers': "resource_info[res]['Resources'][0]", 'tablefmt': '"""grid"""', 'disable_numparse': '(True)'}), "(resource_info[res]['Resources'][1:], headers=resource_info[res][\n 'Resources'][0], tablefmt='grid', disable_numparse=True)\n", (1364, 1490), False, 'from tabulate import tabulate\n'), ((2427, 2438), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2435, 2438), False, 'import sys\n'), ((2646, 2657), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2654, 2657), False, 'import sys\n'), ((2561, 2575), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2573, 2575), False, 'import sys\n')]
|
import logging
from abc import ABC, abstractmethod
from pony.orm import db_session, commit
log = logging.getLogger(__name__)
class Importer(ABC):
def __init__(self, TargetEntity):
self.TargetEntity = TargetEntity
@db_session
def truncate(self):
log.info('Truncating target tables...')
self.TargetEntity.select().delete(bulk=True)
commit()
log.info('...done!')
@abstractmethod
def __iter__(self):
"""iterate over items to be imported"""
return
|
[
"logging.getLogger",
"pony.orm.commit"
] |
[((99, 126), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (116, 126), False, 'import logging\n'), ((380, 388), 'pony.orm.commit', 'commit', ([], {}), '()\n', (386, 388), False, 'from pony.orm import db_session, commit\n')]
|
# Generated by Django 3.2.7 on 2021-10-09 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pybo', '0005_auto_20211010_0320'),
]
operations = [
migrations.AddField(
model_name='issue',
name='agree_representor_id',
field=models.CharField(default='', max_length=20, null=True),
),
migrations.AddField(
model_name='issue',
name='disagree_representor_id',
field=models.CharField(default='', max_length=20, null=True),
),
migrations.AlterField(
model_name='issue',
name='agree_representor',
field=models.CharField(default='', max_length=20, null=True),
),
migrations.AlterField(
model_name='issue',
name='disagree_representor',
field=models.CharField(default='', max_length=20, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((344, 398), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(20)', 'null': '(True)'}), "(default='', max_length=20, null=True)\n", (360, 398), False, 'from django.db import migrations, models\n'), ((534, 588), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(20)', 'null': '(True)'}), "(default='', max_length=20, null=True)\n", (550, 588), False, 'from django.db import migrations, models\n'), ((720, 774), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(20)', 'null': '(True)'}), "(default='', max_length=20, null=True)\n", (736, 774), False, 'from django.db import migrations, models\n'), ((909, 963), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(20)', 'null': '(True)'}), "(default='', max_length=20, null=True)\n", (925, 963), False, 'from django.db import migrations, models\n')]
|
import json
import os
def get_file_index(filesProcessed):
new_dict = {}
for f in filesProcessed:
new_dict[f]={"framerate": 30.0, "selected": {"0": 1, "9000": 0}}
return new_dict
ref = json.load(open("/home/lijun/downloads/kf1_meta/references/kf1_all.json","r"))
files = ref["filesProcessed"]
print(len(files))
output = json.load(open("/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output.json","r"))
output["filesProcessed"] = files
jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output-mod.json"
with open(jname,'w') as j:
json.dump(output,j,indent=2,ensure_ascii=False)
file_dict = get_file_index(files)
jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/file-index.json"
with open(jname,'w') as j:
json.dump(file_dict,j,indent=2,ensure_ascii=False)
|
[
"json.dump"
] |
[((597, 647), 'json.dump', 'json.dump', (['output', 'j'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(output, j, indent=2, ensure_ascii=False)\n', (606, 647), False, 'import json\n'), ((801, 854), 'json.dump', 'json.dump', (['file_dict', 'j'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(file_dict, j, indent=2, ensure_ascii=False)\n', (810, 854), False, 'import json\n')]
|
from enum import Enum, auto
import funcy as fn
import numpy as np
from monotone_bipartition import rectangles as mdtr
from monotone_bipartition import refine
EPS = 1e-4
class SearchResultType(Enum):
TRIVIALLY_FALSE = auto()
TRIVIALLY_TRUE = auto()
NON_TRIVIAL = auto()
def diagonal_convex_comb(r):
bot, top = np.array(r.bot), np.array(r.top)
diag = top - bot
return lambda t: bot + t * diag
def binsearch(r, oracle, eps=EPS, find_lambda=False):
"""Binary search over the diagonal of the rectangle.
Returns the lower and upper approximation on the diagonal.
"""
f = diagonal_convex_comb(r)
feval = fn.compose(oracle, f)
lo, hi = 0, 1
# Early termination via bounds checks
if feval(lo):
result_type = SearchResultType.TRIVIALLY_TRUE
hi = 0
elif not feval(hi):
result_type = SearchResultType.TRIVIALLY_FALSE
else:
result_type = SearchResultType.NON_TRIVIAL
mid = lo
while hi - lo > eps:
mid = lo + (hi - lo) / 2
lo, hi = (lo, mid) if feval(mid) else (mid, hi)
if find_lambda:
if result_type == SearchResultType.TRIVIALLY_TRUE:
return result_type, -1
elif result_type == SearchResultType.TRIVIALLY_FALSE:
return result_type, 2
return result_type, (lo+hi)/2
else:
return result_type, mdtr.to_rec(zip(f(lo), f(hi)))
def line_intersect(func, point, tol, *, percent=False):
box_intersect = np.array(point) / max(point)
origin = [0]*len(point)
rec = mdtr.to_rec(zip(origin, box_intersect)) # Compute bounding rec.
return binsearch(rec, func, eps=tol, find_lambda=percent)[1]
def lexicographic_opt(func, ordering, tol):
dim = len(ordering)
assert set(fn.pluck(0, ordering)) == set(range(dim))
tol /= dim # Need to compensate for multiple binsearches.
rec = refine.bounding_box(
domain=mdtr.unit_rec(dim),
oracle=func
)
# If polarity is True, set initial value at bounding.top.
# O.w. use bounding.bot.
base = tuple((rec.top if p else rec.bot)[i] for i, p in sorted(ordering))
res_rec = mdtr.to_rec(zip(base, base))
for idx, polarity in ordering:
oracle = func
rec = mdtr.to_rec(
(0, 1) if i == idx else (p, p) for i, p in enumerate(base)
)
result_type, res_cand = binsearch(rec, oracle, eps=tol)
if result_type == SearchResultType.NON_TRIVIAL:
res_rec = res_cand
base = res_rec.bot
return res_rec
|
[
"enum.auto",
"funcy.pluck",
"funcy.compose",
"numpy.array",
"monotone_bipartition.rectangles.unit_rec"
] |
[((226, 232), 'enum.auto', 'auto', ([], {}), '()\n', (230, 232), False, 'from enum import Enum, auto\n'), ((254, 260), 'enum.auto', 'auto', ([], {}), '()\n', (258, 260), False, 'from enum import Enum, auto\n'), ((279, 285), 'enum.auto', 'auto', ([], {}), '()\n', (283, 285), False, 'from enum import Enum, auto\n'), ((651, 672), 'funcy.compose', 'fn.compose', (['oracle', 'f'], {}), '(oracle, f)\n', (661, 672), True, 'import funcy as fn\n'), ((332, 347), 'numpy.array', 'np.array', (['r.bot'], {}), '(r.bot)\n', (340, 347), True, 'import numpy as np\n'), ((349, 364), 'numpy.array', 'np.array', (['r.top'], {}), '(r.top)\n', (357, 364), True, 'import numpy as np\n'), ((1500, 1515), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1508, 1515), True, 'import numpy as np\n'), ((1782, 1803), 'funcy.pluck', 'fn.pluck', (['(0)', 'ordering'], {}), '(0, ordering)\n', (1790, 1803), True, 'import funcy as fn\n'), ((1934, 1952), 'monotone_bipartition.rectangles.unit_rec', 'mdtr.unit_rec', (['dim'], {}), '(dim)\n', (1947, 1952), True, 'from monotone_bipartition import rectangles as mdtr\n')]
|
from flask import url_for
from flaskcbv.view import View
from flaskcbv.conf import settings
from misc.mixins import HelperMixin
from misc.views import JSONView
class authView(JSONView):
def helper(self):
return """Authorizaion handler
Use "login" and "passwd" arguments by GET or POST to get session
"""
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def post(self, *args, **kwargs):
try:
username = self.get_argument_smart('username')
passwd = self.get_argument_smart('password')
except Exception as err:
self.abort_error(errno=-1, error="wrong_params", details="set arguments: 'username', 'passwd'")
r = settings._BB_CLIENT.login(username, passwd)
answ = r.as_dict
del answ["cmd"]
del answ["token"]
self.abort_error(**answ)
class sessionView(JSONView):
def helper(self):
return """Session check handler
Use "session" argument by GET or POST to check your session
"""
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def post(self, *args, **kwargs):
try:
session = self.get_argument_smart('session')
except Exception as err:
self.abort_error(errno=-1, error="wrong_params", details="set argument: 'session'")
r = settings._BB_CLIENT.session(session)
answ = r.as_dict
del answ["cmd"]
del answ["token"]
self.abort_error(**answ)
|
[
"flaskcbv.conf.settings._BB_CLIENT.session",
"flaskcbv.conf.settings._BB_CLIENT.login"
] |
[((749, 792), 'flaskcbv.conf.settings._BB_CLIENT.login', 'settings._BB_CLIENT.login', (['username', 'passwd'], {}), '(username, passwd)\n', (774, 792), False, 'from flaskcbv.conf import settings\n'), ((1413, 1449), 'flaskcbv.conf.settings._BB_CLIENT.session', 'settings._BB_CLIENT.session', (['session'], {}), '(session)\n', (1440, 1449), False, 'from flaskcbv.conf import settings\n')]
|
import morepath
from webtest import TestApp as Client
def test_implicit_function():
class app(morepath.App):
@morepath.dispatch_method()
def one(self):
return "Default one"
@morepath.dispatch_method()
def two(self):
return "Default two"
@app.path(path='')
class Model(object):
def __init__(self):
pass
@app.method(app.one)
def one_impl(self):
return self.two()
@app.method(app.two)
def two_impl(self):
return "The real two"
@app.view(model=Model)
def default(self, request):
return request.app.one()
c = Client(app())
response = c.get('/')
assert response.body == b'The real two'
def test_implicit_function_mounted():
class base(morepath.App):
@morepath.dispatch_method()
def one(self):
return "Default one"
@morepath.dispatch_method()
def two(self):
return "Default two"
class alpha(base):
pass
class beta(base):
def __init__(self, id):
self.id = id
@alpha.mount(path='mounted/{id}', app=beta)
def mount_beta(id):
return beta(id=id)
class AlphaRoot(object):
pass
class Root(object):
def __init__(self, id):
self.id = id
@alpha.path(path='/', model=AlphaRoot)
def get_alpha_root():
return AlphaRoot()
@beta.path(path='/', model=Root)
def get_root(app):
return Root(app.id)
@beta.method(base.one)
def one_impl(self):
return self.two()
@beta.method(base.two)
def two_impl(self):
return "The real two"
@alpha.view(model=AlphaRoot)
def alpha_default(self, request):
return request.app.one()
@beta.view(model=Root)
def default(self, request):
return "View for %s, message: %s" % (self.id, request.app.one())
c = Client(alpha())
response = c.get('/mounted/1')
assert response.body == b'View for 1, message: The real two'
response = c.get('/')
assert response.body == b'Default one'
|
[
"morepath.dispatch_method"
] |
[((124, 150), 'morepath.dispatch_method', 'morepath.dispatch_method', ([], {}), '()\n', (148, 150), False, 'import morepath\n'), ((217, 243), 'morepath.dispatch_method', 'morepath.dispatch_method', ([], {}), '()\n', (241, 243), False, 'import morepath\n'), ((816, 842), 'morepath.dispatch_method', 'morepath.dispatch_method', ([], {}), '()\n', (840, 842), False, 'import morepath\n'), ((909, 935), 'morepath.dispatch_method', 'morepath.dispatch_method', ([], {}), '()\n', (933, 935), False, 'import morepath\n')]
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve_grayscale_padding = __import__(
'2-convolve_grayscale_padding').convolve_grayscale_padding
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/MNIST.npz')
images = dataset['X_train']
print(images.shape)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
images_conv = convolve_grayscale_padding(images, kernel, (2, 4))
print(images_conv.shape)
plt.imshow(images[0], cmap='gray')
plt.show()
plt.imshow(images_conv[0], cmap='gray')
plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.array",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((223, 274), 'numpy.load', 'np.load', (['"""../../supervised_learning/data/MNIST.npz"""'], {}), "('../../supervised_learning/data/MNIST.npz')\n", (230, 274), True, 'import numpy as np\n'), ((344, 390), 'numpy.array', 'np.array', (['[[1, 0, -1], [1, 0, -1], [1, 0, -1]]'], {}), '([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n', (352, 390), True, 'import numpy as np\n'), ((494, 528), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[0]'], {'cmap': '"""gray"""'}), "(images[0], cmap='gray')\n", (504, 528), True, 'import matplotlib.pyplot as plt\n'), ((533, 543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (541, 543), True, 'import matplotlib.pyplot as plt\n'), ((548, 587), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images_conv[0]'], {'cmap': '"""gray"""'}), "(images_conv[0], cmap='gray')\n", (558, 587), True, 'import matplotlib.pyplot as plt\n'), ((592, 602), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (600, 602), True, 'import matplotlib.pyplot as plt\n')]
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace =True)
data.head(10)
#Code starts here
# --------------
try:
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
#print(data['Better_Event'])
Total_Count=data['Better_Event'].value_counts()
if(Total_Count[0]>Total_Count[1]):
better_event='Summer'
print(better_event)
print(data)
else:
better_event='Winter'
print(better_event)
except:
print("code Failed")
else:
print("code passed Successfully")
# --------------
#Code starts here
top_countries= data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
#print(top_countries)
def top_ten(Col):
country_list= list((data.nlargest(11,Col)['Country_Name']))
country_list=country_list[1:]
print(country_list)
return country_list
top_10_summer=top_ten('Total_Summer')
top_10_winter =top_ten('Total_Winter')
top_10 =top_ten('Total_Medals')
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print("common",common)
# --------------
#Code starts here
summer_df =data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df =data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=summer_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=pd.Series(data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total'])
print(data_1['Total_Points'])
most_points = max(data_1['Total_Points'])
print(most_points)
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(most_points)
print(best_country)
# --------------
#Code starts here
best = pd.DataFrame(data[data['Country_Name']==best_country])
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
# Rotate X-axes labels
plt.xticks(rotation=45)
|
[
"pandas.Series",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"numpy.where",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame"
] |
[((142, 159), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (153, 159), True, 'import pandas as pd\n'), ((2373, 2467), 'pandas.Series', 'pd.Series', (["(data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1['Bronze_Total']\n )"], {}), "(data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1[\n 'Bronze_Total'])\n", (2382, 2467), True, 'import pandas as pd\n'), ((2711, 2767), 'pandas.DataFrame', 'pd.DataFrame', (["data[data['Country_Name'] == best_country]"], {}), "(data[data['Country_Name'] == best_country])\n", (2723, 2767), True, 'import pandas as pd\n'), ((2843, 2870), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (2853, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2898), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (2882, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2947), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2934, 2947), True, 'import matplotlib.pyplot as plt\n'), ((311, 384), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (319, 384), True, 'import numpy as np\n'), ((413, 502), 'numpy.where', 'np.where', (["(data['Total_Summer'] == data['Total_Winter'])", '"""Both"""', "data['Better_Event']"], {}), "(data['Total_Summer'] == data['Total_Winter'], 'Both', data[\n 'Better_Event'])\n", (421, 502), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.4 on 2019-08-14 09:13
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0071_order_gift_cards")]
operations = [
migrations.RenameField(
model_name="order",
old_name="shipping_price_gross",
new_name="shipping_price_gross_amount",
),
migrations.RenameField(
model_name="order",
old_name="shipping_price_net",
new_name="shipping_price_net_amount",
),
migrations.RenameField(
model_name="order", old_name="total_gross", new_name="total_gross_amount"
),
migrations.RenameField(
model_name="order", old_name="total_net", new_name="total_net_amount"
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_gross",
new_name="unit_price_gross_amount",
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_net",
new_name="unit_price_net_amount",
),
migrations.AddField(
model_name="order",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
migrations.AddField(
model_name="orderline",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
]
|
[
"django.db.migrations.RenameField",
"django.db.models.CharField"
] |
[((249, 368), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""order"""', 'old_name': '"""shipping_price_gross"""', 'new_name': '"""shipping_price_gross_amount"""'}), "(model_name='order', old_name='shipping_price_gross',\n new_name='shipping_price_gross_amount')\n", (271, 368), False, 'from django.db import migrations, models\n'), ((421, 536), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""order"""', 'old_name': '"""shipping_price_net"""', 'new_name': '"""shipping_price_net_amount"""'}), "(model_name='order', old_name='shipping_price_net',\n new_name='shipping_price_net_amount')\n", (443, 536), False, 'from django.db import migrations, models\n'), ((589, 691), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""order"""', 'old_name': '"""total_gross"""', 'new_name': '"""total_gross_amount"""'}), "(model_name='order', old_name='total_gross', new_name\n ='total_gross_amount')\n", (611, 691), False, 'from django.db import migrations, models\n'), ((718, 816), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""order"""', 'old_name': '"""total_net"""', 'new_name': '"""total_net_amount"""'}), "(model_name='order', old_name='total_net', new_name=\n 'total_net_amount')\n", (740, 816), False, 'from django.db import migrations, models\n'), ((843, 958), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""orderline"""', 'old_name': '"""unit_price_gross"""', 'new_name': '"""unit_price_gross_amount"""'}), "(model_name='orderline', old_name='unit_price_gross',\n new_name='unit_price_gross_amount')\n", (865, 958), False, 'from django.db import migrations, models\n'), ((1011, 1122), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""orderline"""', 'old_name': '"""unit_price_net"""', 'new_name': '"""unit_price_net_amount"""'}), "(model_name='orderline', old_name='unit_price_net',\n new_name='unit_price_net_amount')\n", (1033, 1122), False, 'from django.db import migrations, models\n'), ((1275, 1381), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'settings.DEFAULT_CURRENCY', 'max_length': 'settings.DEFAULT_CURRENCY_CODE_LENGTH'}), '(default=settings.DEFAULT_CURRENCY, max_length=settings.\n DEFAULT_CURRENCY_CODE_LENGTH)\n', (1291, 1381), False, 'from django.db import migrations, models\n'), ((1548, 1654), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'settings.DEFAULT_CURRENCY', 'max_length': 'settings.DEFAULT_CURRENCY_CODE_LENGTH'}), '(default=settings.DEFAULT_CURRENCY, max_length=settings.\n DEFAULT_CURRENCY_CODE_LENGTH)\n', (1564, 1654), False, 'from django.db import migrations, models\n')]
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExternalMaster(object):
"""
An external master name server used as the source of zone data.
"""
def __init__(self, **kwargs):
"""
Initializes a new ExternalMaster object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param address:
The value to assign to the address property of this ExternalMaster.
:type address: str
:param port:
The value to assign to the port property of this ExternalMaster.
:type port: int
:param tsig_key_id:
The value to assign to the tsig_key_id property of this ExternalMaster.
:type tsig_key_id: str
"""
self.swagger_types = {
'address': 'str',
'port': 'int',
'tsig_key_id': 'str'
}
self.attribute_map = {
'address': 'address',
'port': 'port',
'tsig_key_id': 'tsigKeyId'
}
self._address = None
self._port = None
self._tsig_key_id = None
@property
def address(self):
"""
**[Required]** Gets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:return: The address of this ExternalMaster.
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""
Sets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:param address: The address of this ExternalMaster.
:type: str
"""
self._address = address
@property
def port(self):
"""
Gets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:return: The port of this ExternalMaster.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:param port: The port of this ExternalMaster.
:type: int
"""
self._port = port
@property
def tsig_key_id(self):
"""
Gets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:return: The tsig_key_id of this ExternalMaster.
:rtype: str
"""
return self._tsig_key_id
@tsig_key_id.setter
def tsig_key_id(self, tsig_key_id):
"""
Sets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:param tsig_key_id: The tsig_key_id of this ExternalMaster.
:type: str
"""
self._tsig_key_id = tsig_key_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
[
"oci.util.formatted_flat_dict"
] |
[((3460, 3485), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (3479, 3485), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')]
|
#!/usr/bin/env python3
import os
import sys
import textwrap
self_path = os.path.dirname(os.path.realpath(__file__));
f = open(self_path + "/unicode/CaseFolding.txt", "r")
status_list = [ "C", "F" ]
folding_list = [ dict(), dict(), dict() ]
# Filter the foldings for "full" folding.
for line in f:
comment_off = line.find("#")
if comment_off >= 0:
line = line[:comment_off]
line = line.strip()
if not line:
continue
raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3)
if not status.strip() in status_list:
continue
codepoint = int(raw_codepoint.strip(), 16)
mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")]
mapping_len = len(mapping)
if mapping_len in range(1, 4):
folding_list[mapping_len-1][codepoint] = mapping
else:
assert(False)
f.close()
# If we assume that (index0 ... index-1) makes a range (as defined below),
# check that the newly provided index is compatible with the range too; i.e.
# verify that the range can be extended without breaking its properties.
#
# Currently, we can handle ranges which:
#
# (1) either form consecutive sequence of codepoints and which map that range
# to other consecutive range of codepoints (of the same length);
#
# (2) or a consecutive sequence of codepoints with step 2 where each codepoint
# CP is mapped to the codepoint CP+1
# (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...).
#
# Note: When the codepoints in the range are mapped to multiple codepoints,
# only the 1st mapped codepoint is considered. All the other ones have to be
# shared by all the mappings covered by the range.
def is_range_compatible(folding, codepoint_list, index0, index):
N = index - index0
codepoint0 = codepoint_list[index0]
codepoint1 = codepoint_list[index0+1]
codepointN = codepoint_list[index]
mapping0 = folding[codepoint0]
mapping1 = folding[codepoint1]
mappingN = folding[codepointN]
# Check the range type (1):
if codepoint1 - codepoint0 == 1 and codepointN - codepoint0 == N \
and mapping1[0] - mapping0[0] == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - mapping0[0] == N and mappingN[1:] == mapping0[1:]:
return True
# Check the range type (2):
if codepoint1 - codepoint0 == 2 and codepointN - codepoint0 == 2 * N \
and mapping0[0] - codepoint0 == 1 \
and mapping1[0] - codepoint1 == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - codepointN == 1 and mappingN[1:] == mapping0[1:]:
return True
return False
def mapping_str(list, mapping):
return ",".join("0x{:04x}".format(x) for x in mapping)
for mapping_len in range(1, 4):
folding = folding_list[mapping_len-1]
codepoint_list = list(folding)
index0 = 0
count = len(folding)
records = list()
data_records = list()
while index0 < count:
index1 = index0 + 1
while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1):
index1 += 1
if index1 - index0 > 2:
# Range of codepoints
records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]]))
index0 = index1
else:
# Single codepoint
records.append("S(0x{:04x})".format(codepoint_list[index0]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
index0 += 1
sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
|
[
"os.path.realpath",
"sys.stdout.write"
] |
[((91, 117), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((4045, 4071), 'sys.stdout.write', 'sys.stdout.write', (['"""\n};\n"""'], {}), "('\\n};\\n')\n", (4061, 4071), False, 'import sys\n'), ((4321, 4347), 'sys.stdout.write', 'sys.stdout.write', (['"""\n};\n"""'], {}), "('\\n};\\n')\n", (4337, 4347), False, 'import sys\n')]
|
from controller import Robot
from controller import Motor
from controller import PositionSensor
from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter
import cv2
import numpy as np
import math
import time
robot = Robot()
timeStep = 32
tile_size = 0.12
speed = 6.28
media_baldoza = 0.06
estado = 1
start = 0
global r
global g
global b
r = 0
g = 0
b = 0
# start = robot.getTime()
# Camera initialization
camera = robot.getDevice("camera3")
camera.enable(timeStep)
# Colour sensor initialization
colour_sensor = robot.getDevice("colour_sensor")
colour_sensor.enable(timeStep)
# Distance sensor initialization
distancia_sensor1 = robot.getDevice("distance sensor1")
distancia_sensor1.enable(timeStep)
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
rIzq_encoder = ruedaIzquierda.getPositionSensor()
rDer_encoder = ruedaDerecha.getPositionSensor()
rIzq_encoder.enable(timeStep)
rDer_encoder.enable(timeStep)
# Functions
def leer_sensores():
global r
global g
global b
# Color sensor
image = colour_sensor.getImage()
r = colour_sensor.imageGetRed(image, 1, 0, 0)
g = colour_sensor.imageGetGreen(image, 1, 0, 0)
b = colour_sensor.imageGetBlue(image, 1, 0, 0)
# azul: r=65 g=65 b=252
# rojo: r=252 g=65 b=65
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
"""
# Camara
image = camera.getImage()
imagen = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
frame = cv2.cvtColor(imagen, cv2.COLOR_BGRA2BGR)
cv2.imshow("frame", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale
cv2.imshow("grayScale", frame)
cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold
cv2.imshow("thresh", frame)
cv2.waitKey(1)
# Sensor de Distancia
print("Distancia: " + str(distancia_sensor1.getValue()))
"""
def avanzar(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(vel)
def retroceder(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(-vel)
def girar_der(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(vel)
def girar_izq(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(-vel)
gyro = robot.getDevice("gyro")
gyro.enable(timeStep)
def rotar(angulo):
global angulo_actual
global tiempo_anterior
# iniciar_rotacion
if angulo > 0:
girar_der(0.5)
else:
girar_izq(0.5)
# Mientras no llego al angulo solicitado sigo girando
if (abs(abs(angulo) - angulo_actual) > 1):
tiempo_actual = robot.getTime()
# print("Inicio rotacion angulo", angulo, "Angulo actual:",angulo_actual)
tiempo_transcurrido = tiempo_actual - \
tiempo_anterior # tiempo que paso en cada timestep
# rad/seg * mseg * 1000
radsIntimestep = abs(gyro.getValues()[1]) * tiempo_transcurrido
degsIntimestep = radsIntimestep * 180 / math.pi
# print("rads: " + str(radsIntimestep) +
# " | degs: " + str(degsIntimestep))
angulo_actual += degsIntimestep
# Si se pasa de 360 grados se ajusta la rotacion empezando desde 0 grados
angulo_actual = angulo_actual % 360
# Si es mas bajo que 0 grados, le resta ese valor a 360
if angulo_actual < 0:
angulo_actual += 360
tiempo_anterior = tiempo_actual
# print("Angulo actual:", angulo_actual)
return False
#print("Rotacion finalizada.")
angulo_actual = 0
return True
def delay(ms):
initTime = robot.getTime() # Store starting time (in seconds)
while robot.step(timeStep) != -1:
print("delay")
if (robot.getTime() - initTime) * 1000.0 > ms: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def rotar_enclavado(angulo):
while robot.step(timeStep) != -1:
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rotar(angulo) == True: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def avance(tipo_avance):
start = rDer_encoder.getValue()
velocidad = 0
avance = 0
if tipo_avance == "medio":
velocidad = 3
avance = 2.9
elif tipo_avance == "largo":
avance = 5.9
velocidad = 5.96
elif tipo_avance == "esquina":
avance = 4.1
velocidad = 6.28
while robot.step(timeStep) != -1:
avanzar(velocidad)
leer_sensores()
tipo_pizza()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rDer_encoder.getValue() >= start + avance:
avanzar(0)
break
def retroceso(tipo_retroceso):
start = rDer_encoder.getValue()
velocidad = 0
retroceso = 0
if tipo_retroceso == "medio":
velocidad = 6.28
retroceso = 2.9
elif tipo_retroceso == "largo":
retroceso = 5.9
velocidad = 5.96
elif tipo_retroceso == "esquina":
retroceso = 4.1
velocidad = 6.28
elif tipo_retroceso == "poquito":
retroceso = 1.9
velocidad = 6.28
while robot.step(timeStep) != -1:
retroceder(velocidad)
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if start - retroceso >= rDer_encoder.getValue():
avanzar(0)
break
def tipo_pizza():
#print("valores(1): r:" + str(r) + " , g:" + str(g) + " , b:" + str(b))
if 255 >= r >= 240 and 60 <= b <= 75 and 60 <= g <= 75:
print("(Red)pasaje zona 3 a 1")
elif 150 >= r >= 100 and 210 <= b <= 230 and 60 <= g <= 75:
print("(Vaiolet)pasaje zona 2 a 3")
elif 60 <= r <= 75 and 255 >= b >= 245 and 60 <= g <= 75:
print("(Blue)pasaje zona 1 a 2")
elif 200 <= r <= 220 and 110 >= b >= 100 and 175 <= g <= 180:
print("Entered swamp")
return "swamp"
elif 250 >= r >= 230 and 250 >= b >= 235 and 250 >= g >= 235:
print("Found Checkpoint")
elif r == 233 and b == 233 and g == 233:
print("Azulejo normal")
elif 30 <= r <= 50 :
print("un agujero negro we")
retroceso("medio")
rotar_enclavado(90)
else:
return "prueba"
angulo_actual = 0
tiempo_anterior = robot.getTime()
contador = 0
while robot.step(timeStep) != -1:
avance("medio")
|
[
"controller.Robot"
] |
[((236, 243), 'controller.Robot', 'Robot', ([], {}), '()\n', (241, 243), False, 'from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter\n')]
|
# Generated by Django 3.0.7 on 2020-11-25 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drip', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='querysetrule',
name='rule_type',
field=models.CharField(choices=[('or', 'Or'), ('and', 'And')], default='and', max_length=3),
),
]
|
[
"django.db.models.CharField"
] |
[((329, 418), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('or', 'Or'), ('and', 'And')]", 'default': '"""and"""', 'max_length': '(3)'}), "(choices=[('or', 'Or'), ('and', 'And')], default='and',\n max_length=3)\n", (345, 418), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
"""User functions to streamline working with selected pymer4 LMER fit
attributes from lme4::lmer and lmerTest for ``fitgrid.lmer`` grids.
"""
import functools
import re
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import fitgrid
from fitgrid.fitgrid import LMERFitGrid
def get_lmer_dfbetas(epochs, factor, **kwargs):
r"""Fit lmers leaving out factor levels one by one, compute DBETAS.
Parameters
----------
epochs : Epochs
Epochs object
factor : str
column name of the factor of interest
**kwargs
keyword arguments to pass on to ``fitgrid.lmer``, like ``RHS``
Returns
-------
dfbetas : pandas.DataFrame
dataframe containing DFBETAS values
Examples
--------
Example calculation showing how to pass in model fitting parameters::
dfbetas = fitgrid.utils.lmer.get_lmer_dfbetas(
epochs=epochs,
factor='subject_id',
RHS='x + (x|a)
)
Notes
-----
DFBETAS is computed according to the following formula [NieGroPel2012]_:
.. math::
DFBETAS_{ij} = \frac{\hat{\gamma}_i - \hat{\gamma}_{i(-j)}}{se\left(\hat{\gamma}_{i(-j)}\right)}
for parameter :math:`i` and level :math:`j` of ``factor``.
"""
# get the factor levels
table = epochs.table.reset_index().set_index(
[epochs.epoch_id, epochs.time]
)
levels = table[factor].unique()
# produce epochs tables with each level left out
looo_epochs = (
fitgrid.epochs_from_dataframe(
table[table[factor] != level],
time=epochs.time,
epoch_id=epochs.epoch_id,
channels=epochs.channels,
)
for level in levels
)
# fit lmer on these epochs
fitter = functools.partial(fitgrid.lmer, **kwargs)
grids = map(fitter, looo_epochs)
coefs = (grid.coefs for grid in grids)
# get coefficient estimates and se from leave one out fits
looo_coefs = pd.concat(coefs, keys=levels, axis=1)
looo_estimates = looo_coefs.loc[pd.IndexSlice[:, :, 'Estimate'], :]
looo_se = looo_coefs.loc[pd.IndexSlice[:, :, 'SE'], :]
# get coefficient estimates from regular fit (all levels included)
all_levels_coefs = fitgrid.lmer(epochs, **kwargs).coefs
all_levels_estimates = all_levels_coefs.loc[
pd.IndexSlice[:, :, 'Estimate'], :
]
# drop outer level of index for convenience
for df in (looo_estimates, looo_se, all_levels_estimates):
df.index = df.index.droplevel(level=-1)
# (all_levels_estimate - level_excluded_estimate) / level_excluded_se
dfbetas = all_levels_estimates.sub(looo_estimates, level=1).div(
looo_se, level=1
)
return dfbetas.stack(level=0)
def get_lmer_warnings(lmer_grid):
"""grid the LMERFitGrid lme4::lmer4 warnings by type
lmer warnings are a mishmash of characters, punctuation, and digits, some with
numerical values specific to the message, for instance,
| Model failed to converge with max|grad| = 0.00222262 (tol = 0.002, component 1)
| unable to evaluate scaled gradient
| boundary (singular) fit: see ?isSingular
| np.nan
The warning strings are returned as-is except for stripping
leading and trailing whitespace and the "= N.NNNNNNNN" portion of the
max \|grad\| convergence failure.
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
Returns
-------
warning_grids : dict
A dictionary, the keys are lmer warning strings, each value
is a `pandas.DataFrame` indicator grid where grid.loc[time, channel] == 1 if the
lmer warning == key, otherwise 0.
"""
if not isinstance(lmer_grid, LMERFitGrid):
msg = (
"get_lmer_warnings() must be called on an "
f"LMERFitGrid not {type(lmer_grid)}"
)
raise ValueError(msg)
# In pymer4 0.7.1+ and lme4::lmer 0.22+ warnings come back from
# lme4::lmer via pymer4 as list of strings and each LMERFitgrid
# cell may have a list of 0, 1, 2, ... ? warnings. This means
# LMERFitGrid.warnings time index may have missing time stamps (= no
# warnings), a single time stamp (one warning), or duplicate time
# stamps (> 1 warning) and np.nan at channels where there is no
# warning at that timestamp.
# strip reported decimal values so max|grad| convergence failures are one kind
tidy_strings = lmer_grid.warnings.applymap(
lambda x: re.sub(
r"max\|grad\|\s+=\s+\d+\.\d+\s+", "max|grad| ", x
).strip()
if isinstance(x, str)
else x # no warning == np.nan
).rename_axis([lmer_grid.time, "wdx", "_empty"], axis=0)
# the number and types of warning generally vary by time and/or channel
warning_kinds = (
pd.Series(tidy_strings.to_numpy().flatten()).dropna().unique()
)
# collect messy gappy, multiple warnings as a dict of key==warning,
# value==tidy time x channel indicator grid (0, 1)
warning_grids = {}
assert lmer_grid._grid.shape == lmer_grid.has_warning.shape
for warning_kind in warning_kinds:
# empty grid w/ correct shape, row index and columns
warning_grid = pd.DataFrame(
np.zeros(lmer_grid._grid.shape, dtype=int),
index=lmer_grid._grid.index.copy(),
columns=lmer_grid._grid.columns.copy(),
)
# select rows w/ at least one non-na
warning_rows = tidy_strings[tidy_strings == warning_kind].dropna(
axis=0, how="all"
)
assert warning_rows.index.names[0] == lmer_grid._grid.index.name
assert all(
warning_rows.index.get_level_values(0)
== warning_rows.index.get_level_values(0).unique()
)
for rdx, row in warning_rows.iterrows():
warning_grid.loc[rdx[0], :] = (row == warning_kind).astype(int)
assert all(warning_grid.index == lmer_grid._grid.index)
assert all(warning_grid.columns == lmer_grid._grid.columns)
warning_grids[warning_kind] = warning_grid
return warning_grids
def plot_lmer_warnings(lmer_grid, which="each", verbose=True):
"""Raster plot lme4::lmer warning grids
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
which : {"each", "all", or list of str}
select the types of warnings to plot. `each` (default) plots
each type of warning separately. `all` plots one grid showing
where any type of warning occurred. A list of strings searches
the lmer warnings and plots those that match.
verbose : bool, default=True
If `True` warn of failed matches for warnings keywords.
Examples
--------
default, plot each warning grid separately
>>> plot_lmer_warnings(lmer_grid)
one plot shows everywhere there is a warning
>>> plot_lmer_warnings(lmer_grid, which="all")
plot just warnings that match these strings
>>> plot_lmer_warnings(lmer_grid, which=["converge", "singular"])
"""
def _plot_warnings(warning, warning_grid):
# masked array non-values are transparent in pcolormesh
_, axi = plt.subplots(figsize=(12, len(warning_grid.columns) / 2))
axi.set_title(warning)
ylabels = warning_grid.columns
axi.yaxis.set_major_locator(
mpl.ticker.FixedLocator(np.arange(len(ylabels)))
)
axi.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(ylabels))
axi.pcolormesh(
warning_grid.index,
np.arange(len(ylabels)),
np.ma.masked_not_equal(warning_grid.T.to_numpy(), 1),
shading="nearest",
cmap=mpl.colors.ListedColormap(['red']),
)
# validate kwarg
if not (
isinstance(which, str)
or (
isinstance(which, list)
and all((isinstance(wrn, str) for wrn in which))
)
):
raise ValueError(
"The value for which=value must be 'any', 'each', a warning "
f"string pattern to match or list of them, not this: {which}"
)
warning_grids = get_lmer_warnings(lmer_grid)
warning_grids["all"] = lmer_grid.has_warning.astype(int)
keys = None
if which == "all":
keys = ["all"]
elif which == "each":
keys = list(warning_grids.keys())
else:
# lookup matching patterns var so as to not step on original kwarg
patterns = [which] if isinstance(which, str) else which
keys = []
for pattern in patterns:
matches = [key for key in warning_grids if pattern in key]
keys += matches # may be []
if verbose and not matches:
warnings.warn(f"warning pattern '{pattern}' not found")
assert isinstance(keys, list), f"this should be type list: {type(keys)}"
for key in keys:
if verbose:
print(f"{key}")
_plot_warnings(key, warning_grids[key])
if verbose and not keys:
warnings.warn(f"no model warnings match {which}")
|
[
"fitgrid.lmer",
"matplotlib.ticker.FixedFormatter",
"matplotlib.colors.ListedColormap",
"numpy.zeros",
"functools.partial",
"fitgrid.epochs_from_dataframe",
"warnings.warn",
"re.sub",
"pandas.concat"
] |
[((1866, 1907), 'functools.partial', 'functools.partial', (['fitgrid.lmer'], {}), '(fitgrid.lmer, **kwargs)\n', (1883, 1907), False, 'import functools\n'), ((2069, 2106), 'pandas.concat', 'pd.concat', (['coefs'], {'keys': 'levels', 'axis': '(1)'}), '(coefs, keys=levels, axis=1)\n', (2078, 2106), True, 'import pandas as pd\n'), ((1597, 1732), 'fitgrid.epochs_from_dataframe', 'fitgrid.epochs_from_dataframe', (['table[table[factor] != level]'], {'time': 'epochs.time', 'epoch_id': 'epochs.epoch_id', 'channels': 'epochs.channels'}), '(table[table[factor] != level], time=epochs.\n time, epoch_id=epochs.epoch_id, channels=epochs.channels)\n', (1626, 1732), False, 'import fitgrid\n'), ((2333, 2363), 'fitgrid.lmer', 'fitgrid.lmer', (['epochs'], {}), '(epochs, **kwargs)\n', (2345, 2363), False, 'import fitgrid\n'), ((9241, 9290), 'warnings.warn', 'warnings.warn', (['f"""no model warnings match {which}"""'], {}), "(f'no model warnings match {which}')\n", (9254, 9290), False, 'import warnings\n'), ((5412, 5454), 'numpy.zeros', 'np.zeros', (['lmer_grid._grid.shape'], {'dtype': 'int'}), '(lmer_grid._grid.shape, dtype=int)\n', (5420, 5454), True, 'import numpy as np\n'), ((7675, 7709), 'matplotlib.ticker.FixedFormatter', 'mpl.ticker.FixedFormatter', (['ylabels'], {}), '(ylabels)\n', (7700, 7709), True, 'import matplotlib as mpl\n'), ((7918, 7952), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['red']"], {}), "(['red'])\n", (7943, 7952), True, 'import matplotlib as mpl\n'), ((8952, 9007), 'warnings.warn', 'warnings.warn', (['f"""warning pattern \'{pattern}\' not found"""'], {}), '(f"warning pattern \'{pattern}\' not found")\n', (8965, 9007), False, 'import warnings\n'), ((4653, 4717), 're.sub', 're.sub', (['"""max\\\\|grad\\\\|\\\\s+=\\\\s+\\\\d+\\\\.\\\\d+\\\\s+"""', '"""max|grad| """', 'x'], {}), "('max\\\\|grad\\\\|\\\\s+=\\\\s+\\\\d+\\\\.\\\\d+\\\\s+', 'max|grad| ', x)\n", (4659, 4717), False, 'import re\n')]
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import random
import pickle
import json
import fractions
import math
import subprocess
from logging import getLogger
from functools import reduce
from .dataset import DataSet
from .data_sampler import SequentialSampler, ShuffledSampler, DistributedDataSampler
from .tokenization import FullTokenizer
from .squad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, InputFeatures
logger = getLogger(__name__)
def generate_random_features(sequence_length, vocab_length, batch_size):
features = []
for i in range(batch_size):
features.append(InputFeatures(
i,
None,
None,
None,
None,
None,
np.random.randint(0, vocab_length, size=sequence_length),
None,
np.random.randint(0, 2, size=sequence_length),
0,
None,
None,
np.random.randint(0, sequence_length, size=1),
np.random.randint(0, sequence_length, size=1),
None,
np.random.randint(0, sequence_length+1, size=1)
))
return features
class SquadDataLoader(object):
def __init__(self,
features,
sequence_length=None,
batch_size=1,
dtype=np.int32,
sampler=None):
self.features = features
self.batch_size = batch_size
self.dtype = dtype
self.sequence_length = sequence_length
self.sampler = sampler
if sampler is None:
self.sampler = SequentialSampler(features)
self.num_batches = len(self.sampler)//self.batch_size
def __len__(self):
return self.num_batches
def __iter__(self):
self.feature_iterator = iter([self.features[idx] for idx in self.sampler])
return self
def __next__(self):
items = [next(self.feature_iterator) for _ in range(self.batch_size)]
indicies = []
positions = []
segments = []
sequence_mask_idx = []
start_pos = []
end_pos = []
uid = []
for item in items:
indicies.append(item.input_ids)
padding_max = self.sequence_length if self.sequence_length is not None else len(item.input_ids)
padding_length = len(item.input_ids) - item.padding_start_index
position_padding = np.full(padding_length, padding_max)
position_ids = np.arange(0, item.padding_start_index)
positions.append(np.concatenate((position_ids, position_padding)).astype(np.int32))
segments.append(item.segment_ids)
sequence_mask_idx.append(item.padding_start_index)
start_pos.append(item.start_position)
end_pos.append(item.end_position)
uid.append(item.unique_id)
# Including impossible samples during training is under investigation. T12851
# if item.is_impossible:
# logger.warning("Impossible sample exists in the dataset. "
# f"start pos: {item.start_position}, end pos: {item.end_position}")
inputs = []
for i in [indicies, positions, segments, sequence_mask_idx, start_pos, end_pos, uid]:
inputs.append(np.stack(i))
return inputs
class BertDataTransform(object):
'''
Masks the indices that are larger than the vocab_length
'''
def __init__(self, dataloader, vocab_length, sequence_length, embedding_dict, positional_dict, merge_both_embeddings, is_training=True):
self.dataloader = dataloader
self.vocab_length = vocab_length
self.sequence_length = sequence_length
self.is_training = is_training
self.embedding_dict = embedding_dict
self.positional_dict = positional_dict
self.merge_both_embeddings = merge_both_embeddings
def __len__(self):
return len(self.dataloader)
def __iter__(self):
self.dataloader_iterator = iter(self.dataloader)
return self
def __next__(self):
items = next(self.dataloader_iterator)
# Specific BERT Post Processing. TODO: Find a better place for this processing
# The vocab_length may be smaller than the original vocab... In this case with the custom_op
# Out of Bounds indicies over a certain threshold will cause numerical issues.
# 100 is unknown token [UNK]
# 0 in the label is padding
OOB = items[0] >= self.vocab_length
items[0][OOB] = 100
# Force use of uint32 for all inputs.
for i in range(len(items)):
if self.is_training or i < 4:
items[i] = items[i].astype(np.uint32)
if self.embedding_dict is not None:
items[0] = np.take(self.embedding_dict, items[0], 0)
if self.positional_dict is not None:
positional_expanded = np.take(self.positional_dict, items[1], 0)
if self.merge_both_embeddings:
items[0] += positional_expanded
else:
items[1] = positional_expanded
return items
def load_or_cache_features(input_file,
vocab_file,
sequence_length,
is_training=True,
cache_file=None,
overwrite_cache=False,
do_lower_case=False):
if cache_file is None:
cache_file = input_file + f".{sequence_length}.cache"
if os.path.exists(cache_file) and not overwrite_cache:
examples = None
logger.info(f"Loading Cache {cache_file}")
with open(cache_file, "rb") as f:
features = pickle.load(f)
else:
logger.info("Reading Examples")
examples = read_squad_examples(input_file=input_file,
is_training=is_training,
version_2_with_negative=False)
# google-research/bert uses sequence_length 384 with doc_stride 128
# TODO: Find a good value for the doc_stride with sequence_length <384
doc_stride = 128
if sequence_length < 384:
doc_stride = 64
logger.info("Converting to Features")
features = convert_examples_to_features(examples=examples,
tokenizer=FullTokenizer(vocab_file, do_lower_case=do_lower_case),
max_seq_length=sequence_length,
doc_stride=doc_stride,
max_query_length=64,
is_training=is_training)
logger.info(f"Saving Cache {cache_file}")
with open(cache_file, "wb") as f:
pickle.dump(features, f)
return features, examples
class SquadDataSet(DataSet):
def __init__(self,
features,
examples,
input_file,
is_training,
output_dir=None,
evaluate_script=None,
do_lower_case=False,
n_extra=0,
**kwargs):
super().__init__(**kwargs)
self.features = features
self.examples = examples
self.is_training = is_training
self.input_file = input_file
self.output_dir = output_dir
self.do_lower_case = do_lower_case
if not self.is_training and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
# If examples is None, features was loaded from the cache
# So the examples need to be recreated.
if self.examples is None:
self.examples = read_squad_examples(input_file=self.input_file,
is_training=self.is_training,
version_2_with_negative=False)
self.results = []
self.evaluate_script = evaluate_script
self.n_extra = n_extra
def add_results(self, data, logits):
# Results will be batched. Flatten to individual results
start_logits, end_logits = [
logit.reshape(-1, logit.shape[-1]).tolist()
for logit in logits]
for i, unique_id in enumerate(data["uid"]):
self.results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits[i],
end_logits=end_logits[i]
))
def write_predictions(self, epoch=None):
if self.is_training:
raise RuntimeError("Predictions cannot be written for training datasets")
if self.output_dir is None:
raise RuntimeError("Predictions cannot be written when output_dir is None")
suffix = f"_{epoch}" if epoch is not None else ""
predictions_file = os.path.join(self.output_dir, f"predictions{suffix}.json")
nbest_file = os.path.join(self.output_dir, f"nbest_predictions{suffix}.json")
null_log_odds_file = os.path.join(self.output_dir, f"null_odds{suffix}.json")
self.results = self.results[:len(self.results) - self.n_extra]
write_predictions(self.examples,
self.features,
self.results,
20, 30,
self.do_lower_case,
predictions_file,
nbest_file,
null_log_odds_file,
True,
False, 0)
if self.evaluate_script is not None:
evaluation = subprocess.check_output(["python", self.evaluate_script, self.input_file, predictions_file])
evaluation = json.loads(evaluation)
f1 = evaluation["f1"]
exact_match = evaluation["exact_match"]
status_string = f"F1 Score: {f1} | Exact Match: {exact_match}"
if epoch is not None:
status_string = f"Epoch: {epoch:3}{args.epochs - 1} | " + status_string
logger.info(status_string)
def get_bert_dataset(tensor_shapes,
input_file,
output_dir,
sequence_length,
vocab_file,
vocab_length,
batch_size,
batches_per_step,
embedding_dict,
positional_dict,
merge_both_embeddings=False,
replication_factor=1,
accumulation_factor=1,
shuffle=True,
is_training=True,
overwrite_cache=False,
no_drop_remainder=False,
evaluate_script=None,
generated_data=False,
do_lower_case=False,
max_pipeline_stage=1,
seed=0,
mpi_size=1,
mpi_rank=0,
is_distributed=False):
samples_per_step = batch_size * batches_per_step * \
replication_factor * accumulation_factor
div_factor = batch_size * replication_factor * accumulation_factor * batches_per_step
pad = 0
if generated_data:
features = generate_random_features(
sequence_length, vocab_length, samples_per_step)
examples = None
output_dir = None
logger.info("Generating random dataset")
else:
features, examples = load_or_cache_features(
input_file,
vocab_file,
sequence_length,
is_training,
overwrite_cache=overwrite_cache,
do_lower_case=do_lower_case)
if no_drop_remainder and not generated_data:
# dataset will be padded to be divisible by batch-size and samples-per-step
pad = int(np.ceil(len(features)/div_factor)) * div_factor - len(features)
if is_distributed:
sampler = DistributedDataSampler(
features, seed, shuffle,
mpi_size, mpi_rank, padding=False, padding_sub=pad, div_factor=div_factor)
pad = sampler.get_subpadding_size()
elif shuffle:
sampler = ShuffledSampler(features, seed, pad)
else:
sampler = SequentialSampler(features, pad)
if no_drop_remainder and not generated_data:
logger.info(f"no_drop_remainder: Dataset padded by {pad} samples")
dl = SquadDataLoader(
features,
sequence_length=sequence_length,
batch_size=samples_per_step,
sampler=sampler
)
bert_ds = BertDataTransform(
dl,
vocab_length,
sequence_length,
embedding_dict,
positional_dict,
merge_both_embeddings,
is_training=is_training)
if not is_training:
# Add uid to the data dictionary so evaluation script can be run
tensor_shapes += [
("start", None),
("end", None),
("uid", None)]
ds = SquadDataSet(
features,
examples,
input_file,
is_training,
output_dir,
evaluate_script,
do_lower_case=do_lower_case,
n_extra=pad,
loader=bert_ds,
tensor_shapes=tensor_shapes,
batches_per_step=batches_per_step,
replication_factor=replication_factor,
accumulation_factor=accumulation_factor)
return ds
|
[
"logging.getLogger",
"os.path.exists",
"subprocess.check_output",
"json.loads",
"pickle.dump",
"os.makedirs",
"os.path.join",
"pickle.load",
"numpy.take",
"numpy.stack",
"numpy.random.randint",
"numpy.concatenate",
"numpy.full",
"numpy.arange"
] |
[((1064, 1083), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1073, 1083), False, 'from logging import getLogger\n'), ((6190, 6216), 'os.path.exists', 'os.path.exists', (['cache_file'], {}), '(cache_file)\n', (6204, 6216), False, 'import os\n'), ((9633, 9691), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""predictions{suffix}.json"""'], {}), "(self.output_dir, f'predictions{suffix}.json')\n", (9645, 9691), False, 'import os\n'), ((9713, 9777), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""nbest_predictions{suffix}.json"""'], {}), "(self.output_dir, f'nbest_predictions{suffix}.json')\n", (9725, 9777), False, 'import os\n'), ((9807, 9863), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""null_odds{suffix}.json"""'], {}), "(self.output_dir, f'null_odds{suffix}.json')\n", (9819, 9863), False, 'import os\n'), ((3051, 3087), 'numpy.full', 'np.full', (['padding_length', 'padding_max'], {}), '(padding_length, padding_max)\n', (3058, 3087), True, 'import numpy as np\n'), ((3115, 3153), 'numpy.arange', 'np.arange', (['(0)', 'item.padding_start_index'], {}), '(0, item.padding_start_index)\n', (3124, 3153), True, 'import numpy as np\n'), ((5440, 5481), 'numpy.take', 'np.take', (['self.embedding_dict', 'items[0]', '(0)'], {}), '(self.embedding_dict, items[0], 0)\n', (5447, 5481), True, 'import numpy as np\n'), ((5561, 5603), 'numpy.take', 'np.take', (['self.positional_dict', 'items[1]', '(0)'], {}), '(self.positional_dict, items[1], 0)\n', (5568, 5603), True, 'import numpy as np\n'), ((6382, 6396), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6393, 6396), False, 'import pickle\n'), ((7512, 7536), 'pickle.dump', 'pickle.dump', (['features', 'f'], {}), '(features, f)\n', (7523, 7536), False, 'import pickle\n'), ((8240, 8283), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {'exist_ok': '(True)'}), '(self.output_dir, exist_ok=True)\n', (8251, 8283), False, 'import os\n'), ((10404, 10500), 'subprocess.check_output', 'subprocess.check_output', (["['python', self.evaluate_script, self.input_file, predictions_file]"], {}), "(['python', self.evaluate_script, self.input_file,\n predictions_file])\n", (10427, 10500), False, 'import subprocess\n'), ((10522, 10544), 'json.loads', 'json.loads', (['evaluation'], {}), '(evaluation)\n', (10532, 10544), False, 'import json\n'), ((1365, 1421), 'numpy.random.randint', 'np.random.randint', (['(0)', 'vocab_length'], {'size': 'sequence_length'}), '(0, vocab_length, size=sequence_length)\n', (1382, 1421), True, 'import numpy as np\n'), ((1453, 1498), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'sequence_length'}), '(0, 2, size=sequence_length)\n', (1470, 1498), True, 'import numpy as np\n'), ((1563, 1608), 'numpy.random.randint', 'np.random.randint', (['(0)', 'sequence_length'], {'size': '(1)'}), '(0, sequence_length, size=1)\n', (1580, 1608), True, 'import numpy as np\n'), ((1622, 1667), 'numpy.random.randint', 'np.random.randint', (['(0)', 'sequence_length'], {'size': '(1)'}), '(0, sequence_length, size=1)\n', (1639, 1667), True, 'import numpy as np\n'), ((1699, 1748), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sequence_length + 1)'], {'size': '(1)'}), '(0, sequence_length + 1, size=1)\n', (1716, 1748), True, 'import numpy as np\n'), ((3936, 3947), 'numpy.stack', 'np.stack', (['i'], {}), '(i)\n', (3944, 3947), True, 'import numpy as np\n'), ((3183, 3231), 'numpy.concatenate', 'np.concatenate', (['(position_ids, position_padding)'], {}), '((position_ids, position_padding))\n', (3197, 3231), True, 'import numpy as np\n')]
|
import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def xy2index(xy,resolution):
return xy[...,0]+xy[...,1]*resolution
def get_bbox3d_for_blenderobj(camera_transforms, H, W, near=2.0, far=6.0):
camera_angle_x = float(camera_transforms['camera_angle_x'])
focal = 0.5*W/np.tan(0.5 * camera_angle_x)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
for frame in camera_transforms["frames"]:
c2w = torch.FloatTensor(frame["transform_matrix"])
rays_o, rays_d = get_rays(directions, c2w)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([1.0,1.0,1.0]), torch.tensor(max_bound)+torch.tensor([1.0,1.0,1.0]))
def get_bbox3d_for_llff(poses, hwf, near=0.0, far=1.0):
H, W, focal = hwf
H, W = int(H), int(W)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
poses = torch.FloatTensor(poses)
for pose in poses:
rays_o, rays_d = get_rays(directions, pose)
rays_o, rays_d = get_ndc_rays(H, W, focal, 1.0, rays_o, rays_d)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([0.1,0.1,0.0001]), torch.tensor(max_bound)+torch.tensor([0.1,0.1,0.0001]))
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
|
[
"numpy.tan",
"torch.all",
"ray_utils.get_rays",
"torch.floor",
"ray_utils.get_ndc_rays",
"torch.tensor",
"pdb.set_trace",
"json.load",
"ray_utils.get_ray_directions",
"torch.FloatTensor",
"torch.clamp"
] |
[((137, 231), 'torch.tensor', 'torch.tensor', (['[[[i, j, k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]]'], {'device': '"""cuda"""'}), "([[[i, j, k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],\n device='cuda')\n", (149, 231), False, 'import torch\n'), ((271, 342), 'torch.tensor', 'torch.tensor', (['[[[i, j] for i in [0, 1] for j in [0, 1]]]'], {'device': '"""cuda"""'}), "([[[i, j] for i in [0, 1] for j in [0, 1]]], device='cuda')\n", (283, 342), False, 'import torch\n'), ((1264, 1295), 'ray_utils.get_ray_directions', 'get_ray_directions', (['H', 'W', 'focal'], {}), '(H, W, focal)\n', (1282, 1295), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2343, 2374), 'ray_utils.get_ray_directions', 'get_ray_directions', (['H', 'W', 'focal'], {}), '(H, W, focal)\n', (2361, 2374), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2472, 2496), 'torch.FloatTensor', 'torch.FloatTensor', (['poses'], {}), '(poses)\n', (2489, 2496), False, 'import torch\n'), ((538, 580), 'torch.tensor', 'torch.tensor', (['((1 << log2_hashmap_size) - 1)'], {}), '((1 << log2_hashmap_size) - 1)\n', (550, 580), False, 'import torch\n'), ((876, 918), 'torch.tensor', 'torch.tensor', (['((1 << log2_hashmap_size) - 1)'], {}), '((1 << log2_hashmap_size) - 1)\n', (888, 918), False, 'import torch\n'), ((1174, 1202), 'numpy.tan', 'np.tan', (['(0.5 * camera_angle_x)'], {}), '(0.5 * camera_angle_x)\n', (1180, 1202), True, 'import numpy as np\n'), ((1442, 1486), 'torch.FloatTensor', 'torch.FloatTensor', (["frame['transform_matrix']"], {}), "(frame['transform_matrix'])\n", (1459, 1486), False, 'import torch\n'), ((1512, 1537), 'ray_utils.get_rays', 'get_rays', (['directions', 'c2w'], {}), '(directions, c2w)\n', (1520, 1537), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2545, 2571), 'ray_utils.get_rays', 'get_rays', (['directions', 'pose'], {}), '(directions, pose)\n', (2553, 2571), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2597, 2643), 'ray_utils.get_ndc_rays', 'get_ndc_rays', (['H', 'W', 'focal', '(1.0)', 'rays_o', 'rays_d'], {}), '(H, W, focal, 1.0, rays_o, rays_d)\n', (2609, 2643), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((3710, 3725), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3723, 3725), False, 'import pdb\n'), ((3740, 3782), 'torch.clamp', 'torch.clamp', (['xyz'], {'min': 'box_min', 'max': 'box_max'}), '(xyz, min=box_min, max=box_max)\n', (3751, 3782), False, 'import torch\n'), ((5251, 5266), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5264, 5266), False, 'import pdb\n'), ((5281, 5323), 'torch.clamp', 'torch.clamp', (['xyz'], {'min': 'box_min', 'max': 'box_max'}), '(xyz, min=box_min, max=box_max)\n', (5292, 5323), False, 'import torch\n'), ((7637, 7652), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7650, 7652), False, 'import pdb\n'), ((7667, 7709), 'torch.clamp', 'torch.clamp', (['xyz'], {'min': 'box_min', 'max': 'box_max'}), '(xyz, min=box_min, max=box_max)\n', (7678, 7709), False, 'import torch\n'), ((9829, 9841), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9838, 9841), False, 'import json\n'), ((2066, 2089), 'torch.tensor', 'torch.tensor', (['min_bound'], {}), '(min_bound)\n', (2078, 2089), False, 'import torch\n'), ((2090, 2119), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2102, 2119), False, 'import torch\n'), ((2119, 2142), 'torch.tensor', 'torch.tensor', (['max_bound'], {}), '(max_bound)\n', (2131, 2142), False, 'import torch\n'), ((2143, 2172), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2155, 2172), False, 'import torch\n'), ((3164, 3187), 'torch.tensor', 'torch.tensor', (['min_bound'], {}), '(min_bound)\n', (3176, 3187), False, 'import torch\n'), ((3188, 3220), 'torch.tensor', 'torch.tensor', (['[0.1, 0.1, 0.0001]'], {}), '([0.1, 0.1, 0.0001])\n', (3200, 3220), False, 'import torch\n'), ((3220, 3243), 'torch.tensor', 'torch.tensor', (['max_bound'], {}), '(max_bound)\n', (3232, 3243), False, 'import torch\n'), ((3244, 3276), 'torch.tensor', 'torch.tensor', (['[0.1, 0.1, 0.0001]'], {}), '([0.1, 0.1, 0.0001])\n', (3256, 3276), False, 'import torch\n'), ((3563, 3588), 'torch.all', 'torch.all', (['(xyz <= box_max)'], {}), '(xyz <= box_max)\n', (3572, 3588), False, 'import torch\n'), ((3596, 3621), 'torch.all', 'torch.all', (['(xyz >= box_min)'], {}), '(xyz >= box_min)\n', (3605, 3621), False, 'import torch\n'), ((3856, 3896), 'torch.floor', 'torch.floor', (['((xyz - box_min) / grid_size)'], {}), '((xyz - box_min) / grid_size)\n', (3867, 3896), False, 'import torch\n'), ((4000, 4029), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4012, 4029), False, 'import torch\n'), ((5104, 5129), 'torch.all', 'torch.all', (['(xyz <= box_max)'], {}), '(xyz <= box_max)\n', (5113, 5129), False, 'import torch\n'), ((5137, 5162), 'torch.all', 'torch.all', (['(xyz >= box_min)'], {}), '(xyz >= box_min)\n', (5146, 5162), False, 'import torch\n'), ((5397, 5437), 'torch.floor', 'torch.floor', (['((xyz - box_min) / grid_size)'], {}), '((xyz - box_min) / grid_size)\n', (5408, 5437), False, 'import torch\n'), ((5567, 5596), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5579, 5596), False, 'import torch\n'), ((7490, 7515), 'torch.all', 'torch.all', (['(xyz <= box_max)'], {}), '(xyz <= box_max)\n', (7499, 7515), False, 'import torch\n'), ((7523, 7548), 'torch.all', 'torch.all', (['(xyz >= box_min)'], {}), '(xyz >= box_min)\n', (7532, 7548), False, 'import torch\n'), ((7783, 7823), 'torch.floor', 'torch.floor', (['((xyz - box_min) / grid_size)'], {}), '((xyz - box_min) / grid_size)\n', (7794, 7823), False, 'import torch\n'), ((7953, 7982), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (7965, 7982), False, 'import torch\n')]
|
import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
class Trainer(object):
def __init__(self, cfg):
self.cfg = cfg
# Define Saver
self.saver = Saver(cfg)
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.cfg["log_tb_dir"])
self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": cfg["num_workers"], "pin_memory": True}
self.train_loader, self.val_loader, self.test_loader, dset = make_data_loader(
cfg, **kwargs)
# read landmark centers
self.id2center = np.array(json.load(
open(osp.join(cfg["data_dir"], "id2centers.json")))).astype(np.float64)
self.coding_book = torch.zeros(
(len(self.id2center), cfg["seg_channel"]), dtype=torch.float32).cuda()
torch.nn.init.xavier_uniform(self.coding_book)
print("coding book size = {}".format(self.coding_book.shape))
# generate color map
unique_label = np.arange(len(self.id2center))
unique_label = unique_label.astype(
np.int64) * 6364136223846793005 + 1442695040888963407
color_map = np.zeros((unique_label.shape[0], 3), np.uint8)
color_map[:, 0] = np.bitwise_and(unique_label, 0xff)
color_map[:, 1] = np.bitwise_and(np.right_shift(unique_label, 4), 0xff)
color_map[:, 2] = np.bitwise_and(np.right_shift(unique_label, 8), 0xff)
self.color_map = np.array(color_map)
self.coding_book = Variable(self.coding_book, requires_grad=True)
# Define network
model = VSNet(backbone=cfg["backbone"],
seg_decoder=cfg["seg_decoder"],
vertex_decoder=cfg["vertex_decoder"],
seg_channel=cfg["seg_channel"],
vertex_channel=cfg["vertex_channel"],
output_stride=cfg["out_stride"],
sync_bn=cfg["sync_bn"])
train_params = [{"params": model.get_1x_lr_params(), "lr": cfg["lr"]},
{"params": model.get_10x_lr_params(),
"lr": cfg["lr"] * 10},
{"params": self.coding_book, "lr": cfg["lr"] * 10}
]
# Define Optimizer
if cfg["optimizer"] == "SGD":
optimizer = torch.optim.SGD(train_params, momentum=cfg["momentum"],
weight_decay=cfg["weight_decay"], nesterov=cfg["nesterov"])
elif cfg["optimizer"] == "Adam":
optimizer = torch.optim.Adam(train_params, lr=cfg["lr"],
weight_decay=cfg["weight_decay"], amsgrad=True)
else:
raise NotImplementedError
# Define Criterion
self.seg_criterion = loss_dict[cfg["seg_loss_type"]]
self.vertex_criterion = loss_dict[cfg["vertex_loss_type"]]
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.coding_book.shape[0], cfg["vertex_channel"])
# Define lr scheduler
self.scheduler = LR_Scheduler(mode=cfg["lr_scheduler"], base_lr=cfg["lr"],
num_epochs=cfg["epochs"], iters_per_epoch=len(
self.train_loader),
lr_step=cfg["lr_step"])
self.model = torch.nn.DataParallel(self.model)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = {"mIoU": 0.0, "Acc": 0.0, "Acc": 0.0,
"FWIoU": 0.0, "translation_median": 1000}
if cfg["resume"] is not None and cfg["resume"] == True:
print(os.path.isfile(cfg["resume_checkpoint"]))
if not os.path.isfile(cfg["resume_checkpoint"]):
raise RuntimeError("=> no checkpoint found at {}" .format(
cfg["resume_checkpoint"]))
checkpoint = torch.load(cfg["resume_checkpoint"])
cfg.opt["start_epoch"] = checkpoint["epoch"] - 1
self.model.module.load_state_dict(checkpoint["state_dict"])
if not cfg["ft"]:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
if "coding_book" in checkpoint.keys():
assert self.coding_book.shape == checkpoint["coding_book"].shape
self.coding_book = checkpoint["coding_book"]
else:
print("Alert! coding book does not exist in the checkpoint")
print("=> loaded checkpoint {} (epoch {})"
.format(cfg["resume"], checkpoint["epoch"]))
def validation(self, epoch):
print("=================================")
print("validation")
print("=================================")
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc="\r")
num_iter_val = len(self.val_loader)
test_loss = 0.0
num_images = 0
ten_count = []
five_count = []
three_count = []
one_count = []
translation_list = []
angular_list = []
reproject_list = []
test_seg_loss = 0.0
test_ver_loss = 0.0
for i, data in enumerate(tbar):
image, seg_target, vertex_target = [d.cuda() for d in data[:3]]
valid_mask = data[-1].cuda()
pose_target, camera_k_matrix, ori_img = data[3:]
seg_target = seg_target.long()
valid_mask = (seg_target.detach() > 0).float()
with torch.no_grad():
seg_pred, vertex_pred, seg_pred_x4s = self.model(
image)
loss_seg = 0
if self.cfg["seg_decoder"]:
loss_seg = self.seg_criterion(seg_pred, seg_target, self.coding_book,
margin=self.cfg["seg_loss_margin"],
seg_k=self.cfg["seg_k"],
valid_mask=valid_mask)
test_seg_loss += loss_seg.item()
self.summary.add_scalar(
"val/loss_seg_iter", loss_seg.item(), i + num_iter_val * epoch)
loss_vertex = 0
if self.cfg["vertex_decoder"]:
loss_vertex = self.vertex_criterion(vertex_pred, vertex_target,
valid_mask)
test_ver_loss += loss_vertex.item()
self.summary.add_scalar(
"val/loss_vertex_iter", loss_vertex.item(), i + num_iter_val * epoch)
loss = 0
if self.cfg["seg_decoder"]:
loss += loss_seg
if self.cfg["vertex_decoder"]:
loss += loss_vertex * self.cfg["vertex_loss_ratio"]
test_loss += loss.item()
tbar.set_description("Test loss: %.9f" % (test_loss / (i + 1)))
self.summary.add_scalar(
"val/total_loss_iter", loss.item(), i + num_iter_val * epoch)
global_step = i * \
self.cfg["val_batch_size"] + image.data.shape[0]
# evaluate seg_pred
seg_target = seg_target.detach().squeeze()
if self.cfg["seg_decoder"]:
seg_pred, knn = utils.evaluate_segmentation(seg_pred_x4s,
self.coding_book, seg_target.size(), self.cfg["use_own_nn"])
else:
seg_pred = seg_target
# evaluate vertex
pt3d_filter, pt2d_filter, _ = utils.evaluate_vertex_v2(vertex_pred, seg_pred,
self.id2center, inlier_thresh=0.999,
min_mask_num=self.cfg["val_label_filter_threshsold"])
# pt3d_filter, pt2d_filter = utils.evaluate_vertex(vertex_target, seg_pred, self.id2center)
camera_k_matrix = camera_k_matrix.squeeze().numpy()
translation_distance, angular_distance, error = 1e9, 1e9, 1e9
if pt2d_filter.shape[0] > 6:
# pnp
ret, pose_pred = utils.pnp(
pt3d_filter, pt2d_filter, camera_k_matrix)
error = utils.reproject_error(
pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)
translation_distance, angular_distance = utils.cm_degree_metric(
pose_pred, pose_target)
print(translation_distance, angular_distance, error, i)
ten_count.append(translation_distance <
10 and angular_distance < 10)
five_count.append(translation_distance <
5 and angular_distance < 5)
three_count.append(translation_distance <
3 and angular_distance < 3)
one_count.append(translation_distance <
1 and angular_distance < 1)
translation_list.append(translation_distance)
angular_list.append(angular_distance)
reproject_list.append(error)
# Add batch sample into evaluator
if self.cfg["seg_decoder"]:
self.evaluator.add_seg_batch(seg_target, seg_pred)
if self.cfg["visualize_segmenation"]:
self.summary.visualize_seg_image(ori_img, seg_pred, seg_target,
epoch, i, global_step, self.color_map)
if self.cfg["vertex_decoder"]:
# evaluate vertex_pred
vertex_target, vertex_pred = vertex_target.squeeze(), vertex_pred.squeeze()
self.evaluator.add_vertex_batch(vertex_target, vertex_pred)
# vertex acc的计算
if self.cfg["visualize_voting"]:
if self.cfg["visualize_landmark"] != None and self.cfg["visualize_landmark"]:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step, pt2d_filter, True)
else:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step)
mIoU, Acc, Acc_class, FWIoU = self.summary.visualize_seg_evaluator(
self.evaluator, epoch, "val/seg/")
print("Validation:")
print("[Epoch: %d, numImages: %5d]" % (epoch, num_images))
print("Loss: %.9f" % (test_loss / num_iter_val))
self.summary.add_scalar("val/total_loss_epoch",
test_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_seg_epoch",
test_seg_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_ver_epoch",
test_ver_loss / num_iter_val, epoch)
self.summary.add_scalar("val/pnp/10cm_epoch",
np.mean(ten_count), epoch)
self.summary.add_scalar("val/pnp/5cm_epoch",
np.mean(five_count), epoch)
self.summary.add_scalar("val/pnp/3cm_epoch",
np.mean(three_count), epoch)
self.summary.add_scalar("val/pnp/1cm_epoch", np.mean(one_count), epoch)
self.summary.add_scalar(
"val/pnp/translation_median_epoch", np.median(translation_list), epoch)
self.summary.add_scalar(
"val/pnp/angular_median_epoch", np.median(angular_list), epoch)
new_pred = {"mIoU": mIoU.item(), "Acc": Acc.item(), "Acc_class": Acc_class.item(), "FWIoU": FWIoU.item(),
"10cm": np.mean(ten_count),
"5cm": np.mean(five_count), "3cm": np.mean(three_count), "1cm": np.mean(one_count),
"translation_median": np.median(translation_list), "angular_list": np.median(angular_list)}
print(new_pred)
if new_pred["translation_median"] < self.best_pred["translation_median"]:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
"epoch": epoch + 1,
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"best_pred": self.best_pred,
"coding_book": self.coding_book
}, is_best, save_model=self.cfg["save_model"])
def main():
parser = argparse.ArgumentParser(
description="PyTorch Landmark Segmentation Training")
parser.add_argument("--dataset", type=str,
choices=["7scenes_loc", "cambridge_loc"], help="experiment config file")
parser.add_argument("--scene", type=str, default="",
help="experiment scene")
parser.add_argument("--gpu-id", type=str, default="",
help="experiment gpu id")
parser.add_argument("--use-aug", type=str, default="",
choices=["", "true", "false"], help="experiment use aug")
parser.add_argument("--config", type=str, default=None,
help="experiment config file")
parser.add_argument("--debug", type=str, default="",
choices=["", "true", "false"], help="debug")
parser.add_argument("--resume", type=str, default="true",
choices=["", "true", "false"], help="resume")
args = parser.parse_args()
debug = None
if args.debug != "":
debug = (args.debug == "true")
if args.dataset == "7scenes_loc":
cfg = SevenScenesLocConfig(args.config, debug)
elif args.dataset == "cambridge_loc":
cfg = CambridgeLocConfig(args.config, debug)
if args.scene != "":
cfg.opt["scene"] = args.scene
if args.gpu_id != "":
cfg.opt["devices"] = args.gpu_id
if args.use_aug == "true":
cfg.opt["use_aug"] = True
if args.resume == "true":
cfg.opt["resume"] = True
cfg.opt["resume_checkpoint"] = cfg["export_dir"] + \
'/ckpts/checkpoint-backup.pth.tar'
cfg.print_opt()
cfg.set_environmental_variables()
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed(cfg["seed"])
np.random.seed(cfg["seed"])
random.seed(cfg["seed"])
trainer = Trainer(cfg)
print("Starting Epoch:", trainer.cfg["start_epoch"])
print("Total Epoches:", trainer.cfg["epochs"])
trainer.validation(trainer.cfg["start_epoch"])
trainer.summary.close()
if __name__ == "__main__":
main()
|
[
"dataloaders.make_data_loader",
"numpy.array",
"numpy.right_shift",
"numpy.mean",
"argparse.ArgumentParser",
"utils.utils.pnp",
"numpy.random.seed",
"torch.autograd.Variable",
"utils.metrics.Evaluator",
"utils.summaries.TensorboardSummary",
"utils.utils.evaluate_vertex_v2",
"os.path.isfile",
"models.sync_batchnorm.replicate.patch_replication_callback",
"warnings.filterwarnings",
"numpy.median",
"utils.utils.reproject_error",
"utils.saver.Saver",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"numpy.bitwise_and",
"numpy.zeros",
"utils.utils.cm_degree_metric"
] |
[((585, 618), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (608, 618), False, 'import warnings\n'), ((13676, 13753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Landmark Segmentation Training"""'}), "(description='PyTorch Landmark Segmentation Training')\n", (13699, 13753), False, 'import argparse\n'), ((15442, 15469), 'numpy.random.seed', 'np.random.seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (15456, 15469), True, 'import numpy as np\n'), ((15474, 15498), 'random.seed', 'random.seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (15485, 15498), False, 'import random\n'), ((740, 750), 'utils.saver.Saver', 'Saver', (['cfg'], {}), '(cfg)\n', (745, 750), False, 'from utils.saver import Saver\n'), ((811, 853), 'utils.summaries.TensorboardSummary', 'TensorboardSummary', (["self.cfg['log_tb_dir']"], {}), "(self.cfg['log_tb_dir'])\n", (829, 853), False, 'from utils.summaries import TensorboardSummary\n'), ((1063, 1094), 'dataloaders.make_data_loader', 'make_data_loader', (['cfg'], {}), '(cfg, **kwargs)\n', (1079, 1094), False, 'from dataloaders import make_data_loader\n'), ((1733, 1779), 'numpy.zeros', 'np.zeros', (['(unique_label.shape[0], 3)', 'np.uint8'], {}), '((unique_label.shape[0], 3), np.uint8)\n', (1741, 1779), True, 'import numpy as np\n'), ((1806, 1839), 'numpy.bitwise_and', 'np.bitwise_and', (['unique_label', '(255)'], {}), '(unique_label, 255)\n', (1820, 1839), True, 'import numpy as np\n'), ((2026, 2045), 'numpy.array', 'np.array', (['color_map'], {}), '(color_map)\n', (2034, 2045), True, 'import numpy as np\n'), ((2074, 2120), 'torch.autograd.Variable', 'Variable', (['self.coding_book'], {'requires_grad': '(True)'}), '(self.coding_book, requires_grad=True)\n', (2082, 2120), False, 'from torch.autograd import Variable\n'), ((3587, 3646), 'utils.metrics.Evaluator', 'Evaluator', (['self.coding_book.shape[0]', "cfg['vertex_channel']"], {}), "(self.coding_book.shape[0], cfg['vertex_channel'])\n", (3596, 3646), False, 'from utils.metrics import Evaluator\n'), ((4047, 4085), 'models.sync_batchnorm.replicate.patch_replication_callback', 'patch_replication_callback', (['self.model'], {}), '(self.model)\n', (4073, 4085), False, 'from models.sync_batchnorm.replicate import patch_replication_callback\n'), ((5586, 5618), 'tqdm.tqdm', 'tqdm', (['self.val_loader'], {'desc': "'\\r'"}), "(self.val_loader, desc='\\r')\n", (5590, 5618), False, 'from tqdm import tqdm\n'), ((1882, 1913), 'numpy.right_shift', 'np.right_shift', (['unique_label', '(4)'], {}), '(unique_label, 4)\n', (1896, 1913), True, 'import numpy as np\n'), ((1962, 1993), 'numpy.right_shift', 'np.right_shift', (['unique_label', '(8)'], {}), '(unique_label, 8)\n', (1976, 1993), True, 'import numpy as np\n'), ((12190, 12208), 'numpy.mean', 'np.mean', (['ten_count'], {}), '(ten_count)\n', (12197, 12208), True, 'import numpy as np\n'), ((12302, 12321), 'numpy.mean', 'np.mean', (['five_count'], {}), '(five_count)\n', (12309, 12321), True, 'import numpy as np\n'), ((12415, 12435), 'numpy.mean', 'np.mean', (['three_count'], {}), '(three_count)\n', (12422, 12435), True, 'import numpy as np\n'), ((12497, 12515), 'numpy.mean', 'np.mean', (['one_count'], {}), '(one_count)\n', (12504, 12515), True, 'import numpy as np\n'), ((12605, 12632), 'numpy.median', 'np.median', (['translation_list'], {}), '(translation_list)\n', (12614, 12632), True, 'import numpy as np\n'), ((12718, 12741), 'numpy.median', 'np.median', (['angular_list'], {}), '(angular_list)\n', (12727, 12741), True, 'import numpy as np\n'), ((12893, 12911), 'numpy.mean', 'np.mean', (['ten_count'], {}), '(ten_count)\n', (12900, 12911), True, 'import numpy as np\n'), ((12940, 12959), 'numpy.mean', 'np.mean', (['five_count'], {}), '(five_count)\n', (12947, 12959), True, 'import numpy as np\n'), ((12968, 12988), 'numpy.mean', 'np.mean', (['three_count'], {}), '(three_count)\n', (12975, 12988), True, 'import numpy as np\n'), ((12997, 13015), 'numpy.mean', 'np.mean', (['one_count'], {}), '(one_count)\n', (13004, 13015), True, 'import numpy as np\n'), ((13059, 13086), 'numpy.median', 'np.median', (['translation_list'], {}), '(translation_list)\n', (13068, 13086), True, 'import numpy as np\n'), ((13104, 13127), 'numpy.median', 'np.median', (['angular_list'], {}), '(angular_list)\n', (13113, 13127), True, 'import numpy as np\n'), ((4369, 4409), 'os.path.isfile', 'os.path.isfile', (["cfg['resume_checkpoint']"], {}), "(cfg['resume_checkpoint'])\n", (4383, 4409), False, 'import os\n'), ((4430, 4470), 'os.path.isfile', 'os.path.isfile', (["cfg['resume_checkpoint']"], {}), "(cfg['resume_checkpoint'])\n", (4444, 4470), False, 'import os\n'), ((8469, 8611), 'utils.utils.evaluate_vertex_v2', 'utils.evaluate_vertex_v2', (['vertex_pred', 'seg_pred', 'self.id2center'], {'inlier_thresh': '(0.999)', 'min_mask_num': "self.cfg['val_label_filter_threshsold']"}), "(vertex_pred, seg_pred, self.id2center,\n inlier_thresh=0.999, min_mask_num=self.cfg['val_label_filter_threshsold'])\n", (8493, 8611), False, 'from utils import utils\n'), ((9113, 9165), 'utils.utils.pnp', 'utils.pnp', (['pt3d_filter', 'pt2d_filter', 'camera_k_matrix'], {}), '(pt3d_filter, pt2d_filter, camera_k_matrix)\n', (9122, 9165), False, 'from utils import utils\n'), ((9219, 9294), 'utils.utils.reproject_error', 'utils.reproject_error', (['pt3d_filter', 'pt2d_filter', 'pose_pred', 'camera_k_matrix'], {}), '(pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)\n', (9240, 9294), False, 'from utils import utils\n'), ((9381, 9427), 'utils.utils.cm_degree_metric', 'utils.cm_degree_metric', (['pose_pred', 'pose_target'], {}), '(pose_pred, pose_target)\n', (9403, 9427), False, 'from utils import utils\n'), ((1203, 1247), 'os.path.join', 'osp.join', (["cfg['data_dir']", '"""id2centers.json"""'], {}), "(cfg['data_dir'], 'id2centers.json')\n", (1211, 1247), True, 'import os.path as osp\n')]
|
import numpy as np
import gzip
import pickle
import os
import urllib.request
class MNIST:
host = 'http://yann.lecun.com/exdb/mnist/'
filenames = {
'train': ('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz'),
'test': ('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'),
}
dataset_filename = 'mnist.pkl.gz'
train_samples = 50000
validation_samples = 10000
test_samples = 10000
def __init__(self):
self.current_dir = os.path.dirname(__file__)
if not self.is_dataset_available():
print('Dataset not available! It will be downloaded and decoded, and can be take a while, please wait!')
datasets = self.get_base_datasets_filenames()
for dataset in datasets:
if not self.is_base_dataset_downloaded(dataset):
print(f'Downloading {dataset}...')
self.download_dataset(dataset)
print('Decoding files and saving it...')
self.decode_and_save()
print('Deleting base files (downloaded)...')
for dataset in datasets:
self.delete_dataset(dataset)
print('Done.')
def is_dataset_available(self):
return os.path.exists(os.path.join(self.current_dir, self.dataset_filename))
def get_base_datasets_filenames(self):
return self.filenames['train'] + self.filenames['test']
def is_base_dataset_downloaded(self, filename):
return os.path.exists(os.path.join(self.current_dir, filename))
def download_dataset(self, filename):
url = self.host + filename
dest = os.path.join(self.current_dir, filename)
urllib.request.urlretrieve(url, dest)
def delete_dataset(self, filename):
os.remove(os.path.join(self.current_dir, filename))
def decode_and_save(self):
data = {}
for key, (images_filename, labels_filename) in self.filenames.items():
with gzip.open(os.path.join(self.current_dir, images_filename), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16).reshape(-1, 28*28)
with gzip.open(os.path.join(self.current_dir, labels_filename), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
data[key] = (images, labels)
training = tuple(x[:self.train_samples] for x in data['train'])
validation = tuple(x[self.train_samples:] for x in data['train'])
test = tuple(data['test'])
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'wb') as file:
pickle.dump((training, validation, test), file)
def load(self):
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'rb') as file:
training, validation, test = pickle.load(file)
return training, validation, test
|
[
"pickle.dump",
"os.path.dirname",
"pickle.load",
"os.path.join"
] |
[((464, 489), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (479, 489), False, 'import os\n'), ((1514, 1554), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (1526, 1554), False, 'import os\n'), ((1153, 1206), 'os.path.join', 'os.path.join', (['self.current_dir', 'self.dataset_filename'], {}), '(self.current_dir, self.dataset_filename)\n', (1165, 1206), False, 'import os\n'), ((1389, 1429), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (1401, 1429), False, 'import os\n'), ((1652, 1692), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (1664, 1692), False, 'import os\n'), ((2445, 2492), 'pickle.dump', 'pickle.dump', (['(training, validation, test)', 'file'], {}), '((training, validation, test), file)\n', (2456, 2492), False, 'import pickle\n'), ((2636, 2653), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2647, 2653), False, 'import pickle\n'), ((2369, 2422), 'os.path.join', 'os.path.join', (['self.current_dir', 'self.dataset_filename'], {}), '(self.current_dir, self.dataset_filename)\n', (2381, 2422), False, 'import os\n'), ((2531, 2584), 'os.path.join', 'os.path.join', (['self.current_dir', 'self.dataset_filename'], {}), '(self.current_dir, self.dataset_filename)\n', (2543, 2584), False, 'import os\n'), ((1839, 1886), 'os.path.join', 'os.path.join', (['self.current_dir', 'images_filename'], {}), '(self.current_dir, images_filename)\n', (1851, 1886), False, 'import os\n'), ((2015, 2062), 'os.path.join', 'os.path.join', (['self.current_dir', 'labels_filename'], {}), '(self.current_dir, labels_filename)\n', (2027, 2062), False, 'import os\n')]
|
import os
import numpy as np
import time
import multiprocessing as mp
import csv
import socket
import datetime
import math
import glob
from pypushexp import PushSim
# # input - [recorded item]
# [weight] : 48
# [height] : 160
# [crouch_angle] (deg)
# [step_length_ratio]
# [halfcycle_duration_ratio]
# [push_step] : 8
# [push_duration] (sec) : .2
# [push_force] (N)
# [push_start_timing] (half gait cycle percent)
#
# # output
# [pushed_length] (m) : sim.out_pushed_length
# [pushed_steps] : sim.out_pushed_steps
# [push_strength] : abs(push_force * push_duration / weight)
# [step_length] (m) : sim.getPushedLength()
# [walking_speed] (m/s) : sim.getWalkingSpeed()
# [halfcycle_duration] (s) : sim.getStepLength() /sim.getWalkingSpeed()
#
# # output for hospital
# [distance] : pushed_length * 1000.
# [speed] : walking_speed * 1000.
# [force] : push_strength * 1000.
# [stride] : step_length * 1000.
# [start_timing_time_ic] = sim.start_timing_time_ic
# [mid_timing_time_ic] = sim.mid_timing_time_ic
# [start_timing_foot_ic] = sim.getStartTimingFootIC()
# [mid_timing_foot_ic] = sim.getMidTimingFootIC()
# [start_timing_time_fl] = sim.getStartTimingTimeFL()
# [mid_timing_time_fl] = sim.getMidTimingTimeFL()
# [start_timing_foot_fl] = sim.getStartTimingFootFL()
# [mid_timing_foot_fl] = sim.getMidTimingFootFL()
# # not used
# subject no
# sex
# left leg length
# right leg length
# stride
# speed
# experiment
# file name
# trial no
# push timing : 'left stance'
# push direction : 'from left'
# normalized push length
# push length until first step
# push end timing (time)
# push end timing (foot pos)
# return during first step
# push duration
# push start time
def gettimestringisoformat():
return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
def worker_simulation(sim, param):
try:
push_step, push_duration,\
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,\
weight, height, ith, q = param
# print(int(crouch_angle), step_length_ratio, walk_speed_ratio, push_force, push_start_timing)
sim.setParamedStepParams(int(crouch_angle), step_length_ratio, walk_speed_ratio)
sim.setPushParams(8, 0.2, 0., 0.)
print(step_length_ratio, walk_speed_ratio)
stopcode = sim.simulate()
# stopcode = 0
if stopcode in [0, 3, 4]:
cot = sim.getCostOfTransport()
walking_speed = sim.getWalkingSpeed()
q.put((ith, crouch_angle, walking_speed, cot))
except IndexError:
pass
def write_start(csvfilepath):
csvfile = open(csvfilepath, 'w')
csvfile.write('type,ith,crouch_angle,speed,cot\n')
return csvfile
def write_body(q, csvfile):
while True:
try:
ith, crouch_angle, walking_speed, cot = q.get(False)
csvfile.write('torque,%d,%s,%s,%s\n' % (ith, crouch_angle, walking_speed, cot))
csvfile.flush()
except:
break
def write_end(csvfile):
csvfile.close()
def simulate(sim, launch_order, num, option_str=''):
#=======================================================================
# settings
#=======================================================================
TEST = True if launch_order is None else False
# TEST = True
# TEST = False
weight = 72
height = 170
push_step = 8
push_duration = .2
test_params = [] # element: (crouch_angle, step_length_ratio, halfcycle_duration_ratio, push_force, push_start_timing)
# ===========================================================================
#
# ===========================================================================
if TEST:
# test
additional_str = ''
num = 2
# num = 5000
mean_crouch = [0, 20, 30, 60]
else:
# real
all_mean_crouch = [0, 20, 30, 60]
mean_crouch = [all_mean_crouch[launch_order % len(all_mean_crouch)]]
additional_str = '_%ddeg__push' % mean_crouch[0]
# if launch_order==0:
# param_opt_result = '130810_113234_0_60_push'
# additional_str = '_0_60_push'
# elif launch_order==2:
# param_opt_result = '130810_161152_0_30_60_push'
# additional_str = '_0_30_60_push'
# =======================================================================
# set logger
# =======================================================================
outDir = os.path.dirname(os.path.abspath(__file__)) + '/results/'
if not os.path.exists(outDir):
os.makedirs(outDir)
csvfilepath = outDir + 'COT_' +option_str + '_' + gettimestringisoformat() + '_' + str(num) + 'trials_' + socket.gethostname() + '.csv'
print('start logging at', gettimestringisoformat())
print()
print('<simulation setting>')
# =======================================================================
# test2 : multivariate normal distribution
# =======================================================================
stride_means = [1.1262070300, 0.9529737358, 0.9158506655, 0.8755451448]
speed_means = [0.9943359644, 0.8080297151, 0.7880050552, 0.7435198328]
stride_vars = [0.03234099289, 0.02508595114, 0.02772452640, 0.02817863267]
stride_speed_covars = [0.03779884365, 0.02225320798, 0.02906793442, 0.03000639027]
speed_vars = [0.06929309644, 0.04421889347, 0.04899931048, 0.05194827755]
# crouch angle
# mean_crouch = [0,20,30,60]
std_crouch = 1
# step length
motion_stride_bvh_after_default_param = 1.1886
experi_stride_mean = stride_means[launch_order]
experi_stride_std = math.sqrt(stride_vars[launch_order])
mean_length_ratio = experi_stride_mean / motion_stride_bvh_after_default_param
std_length_ratio = experi_stride_std / motion_stride_bvh_after_default_param
# walk speed
speed_bvh_after_default_param = 0.9134
experi_speed_mean = speed_means[launch_order]
experi_speed_std = math.sqrt(speed_vars[launch_order])
mean_speed_ratio = experi_speed_mean / speed_bvh_after_default_param
std_speed_ratio = experi_speed_std / speed_bvh_after_default_param
# push strength
mean_strength = .535
std_strength = .096
mean_force = -(mean_strength*weight/push_duration)
std_force = (std_strength*weight/push_duration)
# push timing
mean_timing = 34
std_timing = 21
if TEST:
np.set_printoptions(precision=4, linewidth=200)
# for i in range(len(mean_crouch)):
# mean = [mean_crouch[i], mean_length_ratio, mean_duration_ratio, mean_force, mean_timing, mean_crouch[i]]
# cov = np.diag( [std_crouch**2, std_length_ratio**2, std_duration_ratio**2, std_force**2, std_timing**2, 0])
for i in range(len(mean_crouch)):
mean = [mean_crouch[i], mean_length_ratio, mean_speed_ratio, mean_force, mean_timing, mean_crouch[i]]
cov = np.diag([0 , std_length_ratio**2, std_speed_ratio**2, std_force**2, std_timing**2, 0])
cov[1, 2] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
cov[2, 1] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
if len(test_params) == 0:
test_params = np.random.multivariate_normal(mean, cov, num)
else:
test_params = np.vstack((test_params, np.random.multivariate_normal(mean, cov, num)))
# no negative crouch angle
for i in range(len(test_params)):
test_params[i][0] = abs(test_params[i][0])
test_params[i][2] = abs(test_params[i][2])
test_params[i][3] = -abs(test_params[i][3])
# print(test_params)
print()
print('multivariate normal distribution')
print()
print('mean_crouch', mean_crouch)
print('std_crouch', std_crouch)
print()
print('motion_step_stride', motion_stride_bvh_after_default_param)
print('experi_step_length_mean', experi_stride_mean)
print('experi_step_length_std', experi_stride_std)
print('mean_length_ratio', mean_length_ratio)
print('std_length_ratio', std_length_ratio)
print()
print('motion_speed', speed_bvh_after_default_param)
print('experi_speed_mean', experi_speed_mean)
print('experi_speed_std', experi_speed_std)
print('mean_speed_ratio', mean_speed_ratio)
print('std_speed_ratio', std_speed_ratio)
print()
print('num', num)
print()
print('total # of simulations', len(test_params))
print()
# =======================================================================
# simulation
# =======================================================================
pt = time.time()
print('<start simulation>')
print('hostname %s ' % socket.gethostname())
print()
q = mp.Manager().Queue()
groupsize = 100
paramgroups = [[] for i in range( len(test_params)//groupsize + 1 )]
ith = 1
for i in range(len(test_params)):
crouch_angle = test_params[i][0]
step_length_ratio = test_params[i][1]
walk_speed_ratio = test_params[i][2]
push_force = test_params[i][3]
push_start_timing = test_params[i][4]
crouch_label = test_params[i][5]
paramgroups[i//groupsize].append((push_step, push_duration,
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,
weight, height, ith, q))
ith += 1
csvfile = write_start(csvfilepath)
for i in range(len(paramgroups)):
for j in range(len(paramgroups[i])):
print(j)
worker_simulation(sim, paramgroups[i][j])
write_body(q, csvfile)
write_end(csvfile)
print()
_s = time.time() - pt
_h = _s // 3600
_m = _s // 60
_s -= 60 * _m
_m -= 60 * _h
print('elapsed time = %d h:%d m:%d s' % (int(_h), int(_m), int(_s)))
print()
print('end logging at', gettimestringisoformat())
if __name__ == '__main__':
import sys
import re
option = sys.argv[1]
trial_num = int(sys.argv[2])
_metadata_dir = os.path.dirname(os.path.abspath(__file__)) + '/../data/metadata/'
_nn_finding_dir = os.path.dirname(os.path.abspath(__file__)) + '/../nn/*/'
nn_dir = None
if _nn_finding_dir is not None:
nn_dir = glob.glob(_nn_finding_dir + option)[0]
meta_file = _metadata_dir + option + '.txt'
sim = None
if 'muscle' in option:
sim = PushSim(meta_file, nn_dir+'/max.pt', nn_dir+'/max_muscle.pt')
else:
sim = PushSim(meta_file, nn_dir+'/max.pt')
if "all" in option:
simulate(sim, 0, trial_num, option)
simulate(sim, 1, trial_num, option)
simulate(sim, 2, trial_num, option)
simulate(sim, 3, trial_num, option)
else:
crouch = re.findall(r'crouch\d+', option)[0][6:]
simulate(sim, ['0', '20', '30', '60'].index(crouch), trial_num, option)
|
[
"os.path.exists",
"os.makedirs",
"numpy.random.multivariate_normal",
"math.sqrt",
"multiprocessing.Manager",
"numpy.diag",
"pypushexp.PushSim",
"datetime.datetime.now",
"re.findall",
"os.path.abspath",
"socket.gethostname",
"time.time",
"glob.glob",
"numpy.set_printoptions"
] |
[((5920, 5956), 'math.sqrt', 'math.sqrt', (['stride_vars[launch_order]'], {}), '(stride_vars[launch_order])\n', (5929, 5956), False, 'import math\n'), ((6255, 6290), 'math.sqrt', 'math.sqrt', (['speed_vars[launch_order]'], {}), '(speed_vars[launch_order])\n', (6264, 6290), False, 'import math\n'), ((9026, 9037), 'time.time', 'time.time', ([], {}), '()\n', (9035, 9037), False, 'import time\n'), ((4806, 4828), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (4820, 4828), False, 'import os\n'), ((4838, 4857), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (4849, 4857), False, 'import os\n'), ((6702, 6749), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'linewidth': '(200)'}), '(precision=4, linewidth=200)\n', (6721, 6749), True, 'import numpy as np\n'), ((7222, 7320), 'numpy.diag', 'np.diag', (['[0, std_length_ratio ** 2, std_speed_ratio ** 2, std_force ** 2, std_timing **\n 2, 0]'], {}), '([0, std_length_ratio ** 2, std_speed_ratio ** 2, std_force ** 2, \n std_timing ** 2, 0])\n', (7229, 7320), True, 'import numpy as np\n'), ((10168, 10179), 'time.time', 'time.time', ([], {}), '()\n', (10177, 10179), False, 'import time\n'), ((10897, 10962), 'pypushexp.PushSim', 'PushSim', (['meta_file', "(nn_dir + '/max.pt')", "(nn_dir + '/max_muscle.pt')"], {}), "(meta_file, nn_dir + '/max.pt', nn_dir + '/max_muscle.pt')\n", (10904, 10962), False, 'from pypushexp import PushSim\n'), ((10983, 11021), 'pypushexp.PushSim', 'PushSim', (['meta_file', "(nn_dir + '/max.pt')"], {}), "(meta_file, nn_dir + '/max.pt')\n", (10990, 11021), False, 'from pypushexp import PushSim\n'), ((1873, 1896), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1894, 1896), False, 'import datetime\n'), ((4753, 4778), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4768, 4778), False, 'import os\n'), ((4969, 4989), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4987, 4989), False, 'import socket\n'), ((7612, 7657), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'num'], {}), '(mean, cov, num)\n', (7641, 7657), True, 'import numpy as np\n'), ((9098, 9118), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (9116, 9118), False, 'import socket\n'), ((9141, 9153), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (9151, 9153), True, 'import multiprocessing as mp\n'), ((10552, 10577), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10567, 10577), False, 'import os\n'), ((10640, 10665), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10655, 10665), False, 'import os\n'), ((10753, 10788), 'glob.glob', 'glob.glob', (['(_nn_finding_dir + option)'], {}), '(_nn_finding_dir + option)\n', (10762, 10788), False, 'import glob\n'), ((11248, 11280), 're.findall', 're.findall', (['"""crouch\\\\d+"""', 'option'], {}), "('crouch\\\\d+', option)\n", (11258, 11280), False, 'import re\n'), ((7722, 7767), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'num'], {}), '(mean, cov, num)\n', (7751, 7767), True, 'import numpy as np\n')]
|
from django.contrib import admin
from .models import Room, Topic, Message, User
admin.site.register(Room)
admin.site.register(Topic)
admin.site.register(Message)
admin.site.register(User)
|
[
"django.contrib.admin.site.register"
] |
[((81, 106), 'django.contrib.admin.site.register', 'admin.site.register', (['Room'], {}), '(Room)\n', (100, 106), False, 'from django.contrib import admin\n'), ((107, 133), 'django.contrib.admin.site.register', 'admin.site.register', (['Topic'], {}), '(Topic)\n', (126, 133), False, 'from django.contrib import admin\n'), ((134, 162), 'django.contrib.admin.site.register', 'admin.site.register', (['Message'], {}), '(Message)\n', (153, 162), False, 'from django.contrib import admin\n'), ((163, 188), 'django.contrib.admin.site.register', 'admin.site.register', (['User'], {}), '(User)\n', (182, 188), False, 'from django.contrib import admin\n')]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def _assertDictEquals(self, d1, d2):
self.assertEqual(len(d1), len(d2))
for k, v in six.iteritems(d1):
self.assertTrue(k in d2, k)
self.assertEquals(v, d2[k], d2[k])
def testEmpty(self):
hparams = hparam.HParams()
self._assertDictEquals({}, hparams.values())
hparams.parse('')
self._assertDictEquals({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6')
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
expected_str = '[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'
self.assertEquals(expected_str, str(hparams.__str__()))
self.assertEquals(expected_str, str(hparams))
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('aaa=12')
self._assertDictEquals(
{'aaa': 12, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('c_c=relu4,b=-2.0e10')
self._assertDictEquals({'aaa': 12, 'b': -2.0e10, 'c_c': 'relu4'},
hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(-2.0e10, hparams.b)
self.assertEquals('relu4', hparams.c_c)
hparams.parse('c_c=,b=0,')
self._assertDictEquals({'aaa': 12, 'b': 0, 'c_c': ''}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(0.0, hparams.b)
self.assertEquals('', hparams.c_c)
hparams.parse('c_c=2.3",b=+2,')
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals(12, hparams2.aaa)
self.assertEquals(2.0, hparams2.b)
self.assertEquals('2.3"', hparams2.c_c)
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEquals(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEquals() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEquals(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self._assertDictEquals({'aaa': [1], 'b': [2.0, 3.0], 'c_c': ['relu6']},
hparams.values())
self.assertEquals([1], hparams.aaa)
self.assertEquals([2.0, 3.0], hparams.b)
self.assertEquals(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEquals([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEquals([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEquals(['relu4', 'relu12'], hparams.c_c)
self.assertEquals([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEquals([-34], hparams.aaa)
self.assertEquals([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEquals([3], hparams.aaa)
self.assertEquals(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals([3], hparams2.aaa)
self.assertEquals([1.0], hparams2.b)
self.assertEquals(['_12', '3\'4"'], hparams2.c_c)
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6', 'd': True}, hparams.values())
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self._assertDictEquals(
{'aaa': 12, 'b': 3.0, 'c_c': 'relu4', 'd': False}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(3.0, hparams.b)
self.assertEquals('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEquals(12, hparams2.aaa)
self.assertEquals(3.0, hparams2.b)
self.assertEquals('relu4', hparams2.c_c)
self.assertEquals(False, hparams2.d)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
if __name__ == '__main__':
test.main()
|
[
"tensorflow.contrib.training.python.training.hparam.HParams",
"six.iteritems",
"tensorflow.python.platform.test.main"
] |
[((7901, 7912), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (7910, 7912), False, 'from tensorflow.python.platform import test\n'), ((1075, 1092), 'six.iteritems', 'six.iteritems', (['d1'], {}), '(d1)\n', (1088, 1092), False, 'import six\n'), ((1207, 1223), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {}), '()\n', (1221, 1223), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((1490, 1531), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'aaa': '(1)', 'b': '(2.0)', 'c_c': '"""relu6"""'}), "(aaa=1, b=2.0, c_c='relu6')\n", (1504, 1531), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((3599, 3636), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': 'hparam_def'}), '(hparam_def=hparam_def)\n', (3613, 3636), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((4600, 4628), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'use_gpu': '(True)'}), '(use_gpu=True)\n', (4614, 4628), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((4777, 4829), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'aaa': '[1]', 'b': '[2.0, 3.0]', 'c_c': "['relu6']"}), "(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])\n", (4791, 4829), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((6333, 6370), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': 'hparam_def'}), '(hparam_def=hparam_def)\n', (6347, 6370), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((6590, 6639), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'aaa': '(1)', 'b': '(2.0)', 'c_c': '"""relu6"""', 'd': '(True)'}), "(aaa=1, b=2.0, c_c='relu6', d=True)\n", (6604, 6639), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((7214, 7266), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'aaa': '(10)', 'b': '(20.0)', 'c_c': '"""hello"""', 'd': '(False)'}), "(aaa=10, b=20.0, c_c='hello', d=False)\n", (7228, 7266), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((7558, 7586), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': '(1)'}), '(hparam_def=1)\n', (7572, 7586), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((7647, 7677), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': '(1.0)'}), '(hparam_def=1.0)\n', (7661, 7677), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((7738, 7772), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': '"""hello"""'}), "(hparam_def='hello')\n", (7752, 7772), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((7833, 7869), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': '[1, 2, 3]'}), '(hparam_def=[1, 2, 3])\n', (7847, 7869), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((3949, 3980), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'use_gpu': 'initial'}), '(use_gpu=initial)\n', (3963, 3980), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((4215, 4252), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {'hparam_def': 'hparam_def'}), '(hparam_def=hparam_def)\n', (4229, 4252), False, 'from tensorflow.contrib.training.python.training import hparam\n')]
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Utilities to create/drop views.
Based on a recipe published in:
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/Views
"""
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
__docformat__ = 'reStructuredText en'
__all__ = ['CreateView',
'DropView',
'view_factory',
]
class CreateView(DDLElement):
def __init__(self, name, selectable): # pylint: disable=W0231
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name): # pylint: disable=W0231
self.name = name
@compiler.compiles(CreateView, 'postgresql')
def create_view_compile_postgresql(element, compiler, **kw): # pylint: disable=W0621,W0613
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE OR REPLACE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(CreateView, 'sqlite')
def create_view_compile_sqlite(element, compiler, **kw): # pylint: disable=W0621,W0613
# FIXME: duplicate code
# FIXME: it seems that there is a bug in SQLAlchemy and creating views
# this way emits an exception
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(DropView)
def drop_view_compile(element, compiler, **kw): # pylint: disable=W0621,W0613
return "DROP VIEW %s" % (element.name)
def view_factory(name, metadata, selectable):
if not hasattr(metadata, 'views'):
metadata.views = {}
metadata.views[name] = table(name)
for c in selectable.c:
c._make_proxy(metadata.views[name]) # pylint: disable=W0212
CreateView(name, selectable).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return metadata.views[name]
|
[
"sqlalchemy.sql.table",
"sqlalchemy.ext.compiler.sql_compiler.process",
"sqlalchemy.ext.compiler.compiles"
] |
[((797, 840), 'sqlalchemy.ext.compiler.compiles', 'compiler.compiles', (['CreateView', '"""postgresql"""'], {}), "(CreateView, 'postgresql')\n", (814, 840), False, 'from sqlalchemy.ext import compiler\n'), ((1365, 1404), 'sqlalchemy.ext.compiler.compiles', 'compiler.compiles', (['CreateView', '"""sqlite"""'], {}), "(CreateView, 'sqlite')\n", (1382, 1404), False, 'from sqlalchemy.ext import compiler\n'), ((2058, 2085), 'sqlalchemy.ext.compiler.compiles', 'compiler.compiles', (['DropView'], {}), '(DropView)\n', (2075, 2085), False, 'from sqlalchemy.ext import compiler\n'), ((948, 997), 'sqlalchemy.ext.compiler.sql_compiler.process', 'compiler.sql_compiler.process', (['element.selectable'], {}), '(element.selectable)\n', (977, 997), False, 'from sqlalchemy.ext import compiler\n'), ((1652, 1701), 'sqlalchemy.ext.compiler.sql_compiler.process', 'compiler.sql_compiler.process', (['element.selectable'], {}), '(element.selectable)\n', (1681, 1701), False, 'from sqlalchemy.ext import compiler\n'), ((2350, 2361), 'sqlalchemy.sql.table', 'table', (['name'], {}), '(name)\n', (2355, 2361), False, 'from sqlalchemy.sql import table\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eugene'
'''
MIT License
Copyright (c) 2015 <NAME> (email : <EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Step 1) Load template files to memory
Step 2) Search and replace these tags in memory (including filenames).
<<<NAMESPACE>>>
<<<STATEMACHINENAME>>> or <<<CLASSNAME>>>
<<<AUTHOR>>>
Step 3) Search for the following pairs of tags
<<<PER_STATE_BEGIN>>>
<<<PER_STATE_END>>>
<<<PER_EVENT_BEGIN>>>
<<<PER_EVENT_END>>>
<<<PER_ACTION_BEGIN>>>
<<<PER_ACTION_END>>>
<<<PER_ACTION_SIGNATURE_BEGIN>>>
<<<PER_ACTION_SIGNATURE_END>>>
<<<PER_GUARD_BEGIN>>>
<<<PER_GUARD_END>>>
and duplicate the following for each item, replacing each tag with the item name
<<<STATENAME>>>
<<<EVENTNAME>>>
<<<ACTIONNAME>>>
<<<GUARDNAME>>>
These need to be expanded for event structs
<<<EVENTSIGNATURE>>>
<<<EVENTMEMBERSINSTANTIATE>>>
<<<EVENTMEMBERSDECLARE>>>
When looping <<<ALPH>>> should increment from a through Z.
When looping <<<NUM>>> should increment from 1 through 10000.
When reading the transition table, first state name (top, left) should be set to the value for this tag : <<<STATE_0>>>
Then, the transition table needs to go here, following the rules.
<<<TTT_BEGIN>>>
<<<TTT_END>>>
or
<<<TTT_LITE_BEGIN>>>
<<<TTT_LITE_END>>>
or
<<<TTT_LITE_SML_BEGIN>>>
<<<TTT_LITE_SML_END>>>
# EMBEDDED SM SUPPORT.
Step 4) In each <<PER_XXX tag, there might be more expansion required. The following tags apply in this pass
<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>
<<<PER_EVENT_NEXT_STATE_END>>>
and the following replacement tags will be correctly set
<<<EVENTSTATECURRENT>>>
<<<EVENTSTATENEXT>>>
Also, the original SM only allows a single state-based action to happen.
I want there to be several actions allowed in a State, based on several events valid in that state.
These tags provide for that.
<<<PER_STATE_ACTION_EVENT_BEGIN>>>
<<<PER_STATE_ACTION_EVENT_END>>>
and the following replacement tags will be correctly set
<<<PER_STATE_ACTION>>>
<<<PER_STATE_EVENT>>>
# END EMBEDDED SM SUPPORT.
'''
__TAG_AUTHOR__ = '<<<AUTHOR>>>'
__TAG_GROUP__ = '<<<GROUP>>>'
__TAG_BRIEF__ = '<<<BRIEF>>>'
__TAG_NAMESPACE__ = '<<<NAMESPACE>>>'
__TAG_SM_NAME__ = '<<<STATEMACHINENAME>>>'
__TAG_SM_NAME_UPPER__ = '<<<STATEMACHINENAMEUPPER>>>'
__TAG_CLASS_NAME__ = '<<<CLASSNAME>>>'
__TAG_PyIFGen_NAME__ = '<<<PYIFGENNAME>>>'
__TAG_PS_BEGIN__ = "<<<PER_STATE_BEGIN>>>"
__TAG_PS_END__ = "<<<PER_STATE_END>>>"
__TAG_PE_BEGIN__ = "<<<PER_EVENT_BEGIN>>>"
__TAG_PE_END__ = "<<<PER_EVENT_END>>>"
__TAG_PA_BEGIN__ = "<<<PER_ACTION_BEGIN>>>"
__TAG_PA_END__ = "<<<PER_ACTION_END>>>"
__TAG_PASIG_BEGIN__ = "<<<PER_ACTION_SIGNATURE_BEGIN>>>"
__TAG_PASIG_END__ = "<<<PER_ACTION_SIGNATURE_END>>>"
__TAG_PG_BEGIN__ = "<<<PER_GUARD_BEGIN>>>"
__TAG_PG_END__ = "<<<PER_GUARD_END>>>"
__TAG_EVENT_SIGNATURE__ = "<<<EVENTSIGNATURE>>>"
__TAG_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSINSTANTIATE>>>"
__TAG_LITE_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSLITEINSTANTIATE>>>"
__TAG_EVENT_MEMBERDECL__ = "<<<EVENTMEMBERSDECLARE>>>"
__TAG_STATENAME__ = '<<<STATENAME>>>'
__TAG_EVENTNAME__ = '<<<EVENTNAME>>>'
__TAG_EVENTNAME_SMALL_CAMEL__ = '<<<EVENTNAMESMALLCAMEL>>>'
__TAG_ACTIONNAME__ = '<<<ACTIONNAME>>>'
__TAG_GUARDNAME__ = '<<<GUARDNAME>>>'
__TAG_ABC__ = '<<<ALPH>>>'
__TAG_123__ = '<<<NUM>>>'
__TAG_INIT_STATE__ = '<<<STATE_0>>>'
__TAG_TTT_BEGIN__ = '<<<TTT_BEGIN>>>'
__TAG_TTT_END___ = '<<<TTT_END>>>'
__TAG_TTT_LITE_BEGIN__ = '<<<TTT_LITE_BEGIN>>>'
__TAG_TTT_LITE_END__ = '<<<TTT_LITE_END>>>'
__TAG_TTT_LITE_SML_BEGIN__ = '<<<TTT_LITE_SML_BEGIN>>>'
__TAG_TTT_LITE_SML_END__ = '<<<TTT_LITE_SML_END>>>'
__TAG_DECLSPEC_DLL_EXPORT__ = "<<<DLL_EXPORT>>>"
# EMBEDDED SM SUPPORT.
__TAG_EVENT_CURNEX_ST_BEG__ = "<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>"
__TAG_EVENT_CURNEX_ST_END__ = "<<<PER_EVENT_NEXT_STATE_END>>>"
__TAG_EVENT_ST_CUR__ = "<<<EVENTSTATECURRENT>>>"
__TAG_EVENT_ST_NXT__ = "<<<EVENTSTATENEXT>>>"
__TAG_PSAE_BEGIN__ = "<<<PER_STATE_ACTION_EVENT_BEGIN>>>"
__TAG_PSAE_END__ = "<<<PER_STATE_ACTION_EVENT_END>>>"
__TAG_PSAE_ACTION__ = "<<<PER_STATE_ACTION>>>"
__TAG_PSAE_EVENT__ = "<<<PER_STATE_EVENT>>>"
# END EMBEDDED SM SUPPORT.
# Python2 -> 3 shennanigans...try support both
try:
from interface_base import * # py2
except (ModuleNotFoundError, ImportError) as e:
from .interface_base import * # py3
try:
from .preservative import *
except (ModuleNotFoundError, ImportError) as e:
from preservative import *
try:
from .cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
except (ModuleNotFoundError, ImportError) as e:
from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
try:
from LanguageCPP import LanguageCPP
except (ModuleNotFoundError, ImportError) as e:
from .LanguageCPP import LanguageCPP
# Model that describes a state machine.
class CStateMachineModel:
def __init__(self):
self.statemachinename = ""
self.namespacename = ""
self.declspecdllexport = ""
self.pythoninterfacegeneratorfilename = ""
self.states = []
self.actions = []
self.events = []
self.guards = []
# EMBEDDED SM SUPPORT.
self.event_transitions_per_state = {} # ['event', ['next state,current state' , ...]]
self.actionevents_per_state = {} # ['state', [['event', 'action'] , ...]
# END EMBEDDED SM SUPPORT.
self.actionsignatures = OrderedDict()
# Transition Table Model uses State Machine Model to generate all code required for a working state machine.
class CTransitionTableModel(CStateMachineModel):
START_STATE = 0
EVENT = 1
NEXT_STATE = 2
ACTION = 3
GUARD = 4
def __init__(self, tt, nn, smn, dclspc = ""):
CStateMachineModel.__init__(self)
self.transition_table = tt
self.statemachinename = smn
self.namespacename = nn
self.declspecdllexport = dclspc
tstate = OrderedDict()
taction = OrderedDict()
tevent = OrderedDict()
tguard = OrderedDict()
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
tevent_transitions_tmp = {}
# END EMBEDDED SM SUPPORT.
# Filter
for tableline in self.transition_table:
if tableline[self.START_STATE] != "" and tableline[self.START_STATE].lower() != "none":
tstate[tableline[self.START_STATE]] = 0
if tableline[self.NEXT_STATE] != "" and tableline[self.NEXT_STATE].lower() != "none":
tstate[tableline[self.NEXT_STATE]] = 0
if tableline[self.EVENT] != "" and tableline[self.EVENT].lower() != "none":
tevent[tableline[self.EVENT]] = 0
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
raise Exception('Events that dont change state should re-enter the current state.\nPlease fix your transition table')
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
TODO : For the case below, how to support a different 'action' on the in-state-event???? Ie that event might have gotten the machine
to this state with a particular action, but perhaps the user has configured a different action for this event in-state???
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.START_STATE]
else:
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
# This is for in-state-actions based on events...
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
if not (tableline[self.START_STATE] in self.actionevents_per_state):
self.actionevents_per_state[tableline[self.START_STATE]] = []
self.actionevents_per_state[tableline[self.START_STATE]].append([tableline[self.EVENT], tableline[self.ACTION]])
# END EMBEDDED SM SUPPORT.
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
taction[tableline[self.ACTION]] = 0
if not ((tableline[self.ACTION] + tableline[self.EVENT]) in self.actionsignatures):
self.actionsignatures[tableline[self.ACTION] + tableline[self.EVENT]] = (tableline[self.ACTION], tableline[self.EVENT]) #, tableline[self.START_STATE],tableline[self.NEXT_STATE]))
if tableline[self.GUARD] != "" and tableline[self.GUARD].lower() != "none":
tguard[tableline[self.GUARD]] = 0
# Populate CStateMachineModel
for s in tstate:
self.states.append(s)
for e in tevent:
self.events.append(e)
for a in taction:
self.actions.append(a)
for g in tguard:
self.guards.append(g)
# EMBEDDED SM SUPPORT.
for e in tevent:
self.event_transitions_per_state[e] = []
for s in tstate:
key = s+','+e
if key in tevent_transitions_tmp:
self.event_transitions_per_state[e].append([tevent_transitions_tmp[key], s])
else:
self.event_transitions_per_state[e].append(['EVENT_IGNORED', s])
# END EMBEDDED SM SUPPORT.
def __getfirststate__(self):
if not self.transition_table:
return "NO TT PRESENT!"
return self.transition_table[0][0]
class CStateMachineGenerator(CBASEGenerator):
def __init__(self, inputfiledir, outputfiledir, events_interface=None, language=None, author='Anonymous', group='', brief=''):
CBASEGenerator.__init__(self,inputfiledir,outputfiledir,language, author, group, brief)
self.events_interface = events_interface
def __loadtemplates_firstfiltering__(self, smmodel):
"""
See baseclass implementation. This just prepares the dictionary of things to replace
for this type of codegeneration.
@param smmodel:
@return: cgen.CCodeModel, a dictionary -> {filename,[lines]}
"""
dict_to_replace_lines = {}
dict_to_replace_lines[__TAG_SM_NAME_UPPER__] = caps(smmodel.statemachinename)
dict_to_replace_lines[__TAG_SM_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_CLASS_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = smmodel.pythoninterfacegeneratorfilename.replace('.py', '') # hack : for tcpgen simple templates,
if not dict_to_replace_lines[__TAG_PyIFGen_NAME__]:
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = self.vpp_filename
dict_to_replace_lines[__TAG_NAMESPACE__] = smmodel.namespacename
dict_to_replace_lines[__TAG_AUTHOR__] = self.author
dict_to_replace_lines[__TAG_GROUP__] = self.group
dict_to_replace_lines[__TAG_BRIEF__] = self.brief
dict_to_replace_lines[__TAG_DECLSPEC_DLL_EXPORT__] = smmodel.declspecdllexport
dict_to_replace_filenames = {}
dict_to_replace_filenames["TEMPLATE_"] = smmodel.statemachinename
#dict_to_replace_filenames['.ty'] = '.py'
#dict_to_replace_filenames['.t#'] = '.cs'
#dict_to_replace_filenames['.t'] = '.h'
#dict_to_replace_filenames['.hpp'] = '.cpp' # there are no '.hpp' templates...but search and replace will apply '.t -> .h' first so '.tpp' becomes '.hpp'...grrr
return CBASEGenerator.__loadtemplates_firstfiltering__(self,dict_to_replace_lines,dict_to_replace_filenames)
def __get_event_signature__(self,name):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
return self.language.ParameterString(self.language.GetFactoryCreateParams(s, self.events_interface))
return ""
def __instantiate_event_struct_member(self, name, whitespace_cnt, is_ptr=True, instancename="data"):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.InstantiateStructMembers(s, self.events_interface, '', instancename, self.language.Accessor(is_ptr))
result = ''
cnt = 0
for g in guts:
result = result + (whitespace_cnt*' ' if cnt > 0 else '') + g + '\n'
cnt = cnt + 1
return result
return ""
def __declare_event_struct_members(self, name, whitespace_cnt):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.DeclareStructMembers(s, self.events_interface, '', False)
result = ''
cnt = 0
for g in guts:
result = result + ((whitespace_cnt+1)*' ' if cnt > 0 else ' ') + g + '\n'
cnt = cnt + 1
# remove last '\n'
result = result[:-1]
return result
return ""
def hasTag(self, line, tag):
return line.find(tag.replace("<<<", "").replace(">>>", "")) > 0
def hasMemberName(self, a):
return a.find("::") > 0
def extractMemberNameAndTag(self, a):
member = a[a.find("::"):a.find(">>>")].replace("::", "")
tag = a.strip()
return [tag, member]
def __innerexpand__secondfiltering__(self, names2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for name in names2x:
for line in lines2x:
newline = line
newline = newline.replace(__TAG_STATENAME__, name)
newline = newline.replace(__TAG_EVENTNAME_SMALL_CAMEL__, camel_case_small(name))
newline = newline.replace(__TAG_EVENTNAME__, name)
newline = newline.replace(__TAG_ACTIONNAME__, name)
newline = newline.replace(__TAG_GUARDNAME__, name)
newline = newline.replace(__TAG_ABC__, chr(alpha))
newline = newline.replace(__TAG_123__, str(cnt))
# EMBEDDED SM SUPPORT.
newline = newline.replace(__TAG_EVENT_CURNEX_ST_BEG__, __TAG_EVENT_CURNEX_ST_BEG__ + '<<<' + name + '>>>') # put a marker (event name) for mapping
newline = newline.replace(__TAG_PSAE_BEGIN__, __TAG_PSAE_BEGIN__ + '<<<' + name + '>>>') # put a marker (state name) for mapping
# END EMBEDDED SM SUPPORT.
tabcnt = newline.count(' ')
newline = newline.replace(__TAG_EVENT_SIGNATURE__, self.__get_event_signature__(name))
# __TAG_EVENT_MEMBERINST__ -> PTR
if self.hasTag(newline,__TAG_EVENT_MEMBERINST__) and self.hasMemberName(newline):
line_member = self.extractMemberNameAndTag(newline)
newline = newline.replace(line_member[0],self.__instantiate_event_struct_member(name, tabcnt, True, line_member[1]))
else:
newline = newline.replace(__TAG_EVENT_MEMBERINST__, self.__instantiate_event_struct_member(name, tabcnt, True)) # PTR
# __TAG_LITE_EVENT_MEMBERINST__ -> NO PTR
if self.hasTag(newline,__TAG_LITE_EVENT_MEMBERINST__) and self.hasMemberName(newline):
line_member = self.extractMemberNameAndTag(newline)
newline = newline.replace(line_member[0],self.__instantiate_event_struct_member(name, tabcnt, False, line_member[1]))
else:
newline = newline.replace(__TAG_LITE_EVENT_MEMBERINST__, self.__instantiate_event_struct_member(name, tabcnt, False)) # NO PTR
newline = newline.replace(__TAG_EVENT_MEMBERDECL__, self.__declare_event_struct_members(name, tabcnt))
# END EMBEDDED SUPPORT
puthere.append(newline)
cnt = cnt + 1
__getnextalphabet__()
def __innerexpand_actionsignatures__(self, states2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for key, (actionname, eventname) in states2x.items():
if eventname == "" or eventname.lower() == 'none':
eventname = "NONE"
elif eventname.lower() == 'any':
eventname = "ANY"
for line in lines2x:
puthere.append(line
.replace(__TAG_ACTIONNAME__, actionname)
.replace(__TAG_EVENTNAME_SMALL_CAMEL__, camel_case_small(eventname))
.replace(__TAG_EVENTNAME__, eventname)
.replace(__TAG_ABC__, chr(alpha))
.replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
def __transitiontable_replace_NONE__(self, val):
if val == "" or val.lower() == 'none':
val = "msmf::none"
return val
def __transitiontableLITE_guard_replace_NONE__(self, val):
tmp_val = val.replace('__', '')
if tmp_val == "" or tmp_val.lower() == 'none':
val = "boost::msm::gnone"
return val
def __transitiontableLITE_action_replace_NONE__(self, val):
tmp_val = val.replace('__', '')
if tmp_val == "" or tmp_val.lower() == 'none' or tmp_val.lower().find('::none<') > -1:
val = "boost::msm::none"
return val
''' This SM doesnt seem to allow 'none' transitions -> make it transition to the source state'''
def __transitiontableLITE_nextstate_replace_NONE__(self, val, source_state):
tmp_val = val.replace('__', '')
tmp_val = tmp_val.replace('msmf::', '')
if tmp_val == "" or tmp_val.lower() == 'none':
val = source_state
return val
def __expand_secondfiltering__(self, smmodel, cmmodel):
for file in cmmodel.filenames_to_lines:
ex_state = False
ex_event = False
ex_action = False
ex_actionsig = False
ex_guard = False
ex_tt = False
ex_tt_lite = False
ex_tt_lite_sml = False
snipped_to_expand = []
alllinesexpanded = []
for line in cmmodel.filenames_to_lines[file]:
begin = line.find(__TAG_PS_BEGIN__) > -1 or \
line.find(__TAG_PE_BEGIN__) > -1 or \
line.find(__TAG_PA_BEGIN__) > -1 or \
line.find(__TAG_PASIG_BEGIN__) > -1 or \
line.find(__TAG_PG_BEGIN__) > -1 or \
line.find(__TAG_TTT_BEGIN__) > -1 or \
line.find(__TAG_TTT_LITE_BEGIN__) > -1 or \
line.find(__TAG_TTT_LITE_SML_BEGIN__) > -1
ex_state = line.find(__TAG_PS_BEGIN__) > -1 or ex_state
ex_event = line.find(__TAG_PE_BEGIN__) > -1 or ex_event
ex_action = line.find(__TAG_PA_BEGIN__) > -1 or ex_action
ex_actionsig = line.find(__TAG_PASIG_BEGIN__) > -1 or ex_actionsig
ex_guard = line.find(__TAG_PG_BEGIN__) > -1 or ex_guard
ex_tt = line.find(__TAG_TTT_BEGIN__) > -1 or ex_tt
ex_tt_lite = line.find(__TAG_TTT_LITE_BEGIN__) > -1 or ex_tt_lite
ex_tt_lite_sml = line.find(__TAG_TTT_LITE_SML_BEGIN__) > -1 or ex_tt_lite_sml
if not ex_state and not ex_event and not ex_action and not ex_actionsig and not ex_guard and not ex_tt and not ex_tt_lite and not ex_tt_lite_sml:
alllinesexpanded.append(line.replace(__TAG_INIT_STATE__, smmodel.__getfirststate__()))
if ex_state and line.find(__TAG_PS_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.states, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_state = False
if ex_event and line.find(__TAG_PE_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.events, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_event = False
if ex_action and line.find(__TAG_PA_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.actions, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_action = False
if ex_actionsig and line.find(__TAG_PASIG_END__) > -1:
self.__innerexpand_actionsignatures__(smmodel.actionsignatures, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_actionsig = False
if ex_guard and line.find(__TAG_PG_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.guards, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_guard = False
if ex_tt and line.find(__TAG_TTT_END___) > -1:
len_tt = len(smmodel.transition_table)
tt_out = " // " + len("msmf::Row < ") * ' ' + even_space("Start") + even_space("Event") + even_space("Next") + even_space("Action") + even_space("Guard") + '\n'
for i, ttline in enumerate(smmodel.transition_table):
tt_out += ' msmf::Row < '
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.NEXT_STATE] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.ACTION] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.GUARD] )) + '> '
if i != len_tt-1:
tt_out += ","
tt_out += " // " + str(i) + '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt = False
if ex_tt_lite and line.find(__TAG_TTT_LITE_END__) > -1:
tt_out = " // " + even_space("Start + ") + even_space("Event") + even_space("[ Guard ] ") + even_space("/ Action") + even_space(" = Next") + '\n'
startStateHasEntryExit = {}
for i, ttline in enumerate(smmodel.transition_table):
if i == 0: # initial state
tt_out += " *"
else:
tt_out += " , "
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + '+'
tt_out += even_space('event<' + self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT]) + ">") + ' '
tt_out += even_space('['+self.__transitiontableLITE_guard_replace_NONE__('__'+ttline[smmodel.GUARD])+']') + ' / '
tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('__'+ttline[smmodel.ACTION]))
if ttline[smmodel.NEXT_STATE].lower() != 'none': # to not get transitions into/outof state on actions that dont change the state...
tt_out += ' = ' + even_space(self.__transitiontableLITE_nextstate_replace_NONE__(ttline[smmodel.NEXT_STATE], ttline[smmodel.START_STATE]))
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
# State entry/exit, once only
if not (ttline[smmodel.START_STATE] in startStateHasEntryExit):
startStateHasEntryExit[ttline[smmodel.START_STATE]] = True
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_entry / __" + ttline[smmodel.START_STATE] + 'OnEntry\n'
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_exit / __" + ttline[smmodel.START_STATE] + 'OnExit'
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt_lite = False
if ex_tt_lite_sml and line.find(__TAG_TTT_LITE_SML_END__) > -1:
tt_out = " // " + even_space("Start + ") + even_space("Event") + even_space("[ Guard ] ") + even_space("/ Action", 100) + even_space(" = Next") + '\n'
startStateHasEntryExit = {}
for i, ttline in enumerate(smmodel.transition_table):
if i == 0: # initial state
tt_out += " *"
else:
tt_out += " , "
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + '+'
tt_out += even_space('event<' + self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT]) + ">") + ' '
tt_out += even_space('['+self.__transitiontableLITE_guard_replace_NONE__('__'+ttline[smmodel.GUARD])+']') + ' / '
#tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('call(this,&CONCRETE::' + ttline[smmodel.ACTION] + '<' + ttline[smmodel.EVENT] + ">)"), 100)
tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('__' + ttline[smmodel.ACTION]), 100)
if ttline[smmodel.NEXT_STATE].lower() != 'none': # to not get transitions into/outof state on actions that dont change the state...
tt_out += ' = ' + even_space(self.__transitiontableLITE_nextstate_replace_NONE__(ttline[smmodel.NEXT_STATE], ttline[smmodel.START_STATE]))
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
# State entry/exit, once only
if not (ttline[smmodel.START_STATE] in startStateHasEntryExit):
startStateHasEntryExit[ttline[smmodel.START_STATE]] = True
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_entry<_> / __" + ttline[smmodel.START_STATE] + 'OnEntry\n'
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_exit<_> / __" + ttline[smmodel.START_STATE] + 'OnExit'
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt_lite_sml = False
if (ex_state or ex_event or ex_action or ex_actionsig or ex_guard or ex_tt or ex_tt_lite or ex_tt_lite_sml) and not begin:
snipped_to_expand.append(line)
cmmodel.filenames_to_lines[file] = alllinesexpanded
# EMBEDDED SM SUPPORT.
def __innerexpand__thirdfiltering__eventtransitionsperstate(self, namesmap3x, lines3x, puthere):
global alpha
__resetalphabet__()
cnt = 0
# First find the mapping marker
for _map in namesmap3x:
currentstate = _map[1]
nextstate = _map[0]
for line in lines3x:
#puthere.append(line.replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
puthere.append(line.replace(__TAG_EVENT_ST_CUR__, currentstate).replace(__TAG_EVENT_ST_NXT__, nextstate).replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
# this function is pretty much the same as the one above...
def __innerexpand__thirdfiltering__eventactionsperstate(self, namesmap3x, lines3x, puthere):
global alpha
__resetalphabet__()
cnt = 0
# First find the mapping marker
for _map in namesmap3x:
action = _map[1]
event = _map[0]
for line in lines3x:
# puthere.append(line.replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
puthere.append(line.replace(__TAG_PSAE_ACTION__, action).replace(__TAG_PSAE_EVENT__, event).replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
def __expand_thirdfiltering__(self, smmodel, cmmodel):
for file in cmmodel.filenames_to_lines:
ex_state = False
ex_event = False
#ex_action = False
#ex_guard = False
snippet_to_expand = []
alllinesexpanded = []
state_action_map = ''
event_map = ''
for line in cmmodel.filenames_to_lines[file]:
begin = line.find(__TAG_EVENT_CURNEX_ST_BEG__) > -1 or line.find(__TAG_PSAE_BEGIN__) > -1 #or line.find(__TAG_PA_BEGIN__) > -1 or line.find(__TAG_PG_BEGIN__) > -1
if begin:
event_map = line.replace(__TAG_EVENT_CURNEX_ST_BEG__, '').replace('<<<', '').replace('>>>', '').replace('\t', '').replace('\n', '').replace(" ","")
state_action_map = line.replace(__TAG_PSAE_BEGIN__, '').replace('<<<', '').replace('>>>', '').replace('\t', '').replace('\n', '').replace(" ","")
end_event = (line.find(__TAG_EVENT_CURNEX_ST_END__) > -1)
end_state = (line.find(__TAG_PSAE_END__) > -1)
ex_state = line.find(__TAG_PSAE_BEGIN__) > -1 or ex_state
ex_event = line.find(__TAG_EVENT_CURNEX_ST_BEG__) > -1 or ex_event
#ex_action = line.find(__TAG_PA_BEGIN__) > -1 or ex_action
#ex_guard = line.find(__TAG_PG_BEGIN__) > -1 or ex_guard
#if not ex_state and not ex_event and not ex_action and not ex_guard:
# alllinesexpanded.append(line.replace(__TAG_INIT_STATE__, smmodel.__getfirststate__()))
if ex_state and line.find(__TAG_PSAE_END__) > -1:
if state_action_map in smmodel.actionevents_per_state:
self.__innerexpand__thirdfiltering__eventactionsperstate(smmodel.actionevents_per_state[state_action_map], snippet_to_expand, alllinesexpanded)
snippet_to_expand = []
ex_state = False
if ex_event and line.find(__TAG_EVENT_CURNEX_ST_END__) > -1:
self.__innerexpand__thirdfiltering__eventtransitionsperstate(smmodel.event_transitions_per_state[event_map], snippet_to_expand, alllinesexpanded)
snippet_to_expand = []
ex_event = False
#if ex_action and line.find(__TAG_PA_END__) > -1:
# self.__innerexpand__thirdfiltering__(smmodel.actions, snippet_to_expand, alllinesexpanded)
# snippet_to_expand = []
# ex_action = False
#if ex_guard and line.find(__TAG_PG_END__) > -1:
# self.__innerexpand__thirdfiltering__(smmodel.guards, snippet_to_expand, alllinesexpanded)
# snippet_to_expand = []
# ex_guard = False
#if (ex_state or ex_event or ex_action or ex_guard) and not begin:
if (ex_event or ex_state) and not begin:
snippet_to_expand.append(line)
elif not begin and not end_event and not end_state: # Unlike the second pass, this needs to preserve what was done there...
alllinesexpanded.append(line)
cmmodel.filenames_to_lines[file] = alllinesexpanded
# END EMBEDDED SM SUPPORT.
''' Used for State Machine Generation
'''
def Generate(self, transitiontable, namespacenname, statemachinename, dclspc="", copyotherfiles = True):
print("*************************************")
print("******* SMGen ***********************")
print("*************************************")
print(" Output Dir : " + self.output_gen_file_dir)
print(" State Machine: " + statemachinename)
print(" Executing in : " + os.path.realpath(__file__))
print("*************************************")
sm = CTransitionTableModel(transitiontable, namespacenname, statemachinename, dclspc)
cm = self.__loadtemplates_firstfiltering__(sm)
self.__expand_secondfiltering__(sm, cm)
# EMBEDDED SM SUPPORT.
self.__expand_thirdfiltering__(sm, cm)
# END EMBEDDED SM SUPPORT.
# Preserve user tags.
self.__preserve_usertags_in_files__(cm)
'''
# Round-trip Code Preservation. Will load the code to preserve upon creation (if the output dir is not-empty/the same as the one in the compile path).
preservation = Preservative(self.output_gen_file_dir)
preservation.Emplace(cm.filenames_to_lines)
'''
# Write output to file.
self.__createoutput__(cm.filenames_to_lines)
# Copy non-autogenerated required files to output.
if isinstance(self.language, LanguageCPP) and copyotherfiles:
# Files...
files_to_copy = []
files_to_copy.append("allocator.h")
files_to_copy.append("allocator.cpp")
files_to_copy.append("basetypes.h")
files_to_copy.append("CMakeLists.txt")
files_to_copy.append("Fault.h")
files_to_copy.append("Fault.cpp")
files_to_copy.append("stl_allocator.h")
files_to_copy.append("thread_FreeRTOS.h")
files_to_copy.append("thread_FreeRTOS.cpp")
files_to_copy.append("threaded_dispatcher.h")
files_to_copy.append("threaded_dispatcher_FreeRTOS.h")
files_to_copy.append("threadsafe_queue.h")
files_to_copy.append("threadsafe_queue_FreeRTOS.h")
files_to_copy.append("waitcondition.h")
files_to_copy.append("waitcondition.cpp")
files_to_copy.append("xallocator.h")
files_to_copy.append("xallocator.cpp")
files_to_copy.append("xlist.h")
files_to_copy.append("xmap.h")
files_to_copy.append("xqueue.h")
files_to_copy.append("xset.h")
files_to_copy.append("xsstream.h")
files_to_copy.append("xstring.h")
allplatformsfrom = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.join("allplatforms", "CPP"))
allplatformsto = os.path.join(os.path.abspath(self.output_gen_file_dir), "allplatforms")
FileCopyUtil(allplatformsfrom, allplatformsto, files_to_copy)
# Boost SML ...
smlfrom = os.path.join(allplatformsfrom, os.path.join("sml", os.path.join("include","boost")))
smlto = os.path.join(allplatformsto, "boost")
smlfiles_to_copy = []
smlfiles_to_copy.append("sml.hpp")
FileCopyUtil(smlfrom, smlto, smlfiles_to_copy)
# Tests...
testfiles_to_copy = []
testfiles_to_copy.append("CMakeLists.txt")
testfiles_to_copy.append("Test.ThreadingConcepts.cpp")
testfiles_to_copy.append("test_main.cpp")
tests_allplatformsfrom = os.path.join(allplatformsfrom, "testsuite")
tests_allplatformsto = os.path.join(allplatformsto, "testsuite")
FileCopyUtil(tests_allplatformsfrom, tests_allplatformsto, testfiles_to_copy)
# Micro Unit Test Framework
microunit_files_to_copy = []
microunit_files_to_copy.append("minunit.h")
microunit_files_to_copy.append("minunit.cpp")
microunit_allplatformsfrom = os.path.join(tests_allplatformsfrom, "minunit")
microunit_allplatformsto = os.path.join(tests_allplatformsto, "minunit")
FileCopyUtil(microunit_allplatformsfrom, microunit_allplatformsto, microunit_files_to_copy)
''' Used for Protocol Generation
'''
def GenerateProtocol(self, pythoninterfacegeneratorfilename, namespacenname, classname, dclspc="", preserve_dir=""):
sm = CTransitionTableModel([], namespacenname, classname, dclspc)
sm.pythoninterfacegeneratorfilename = pythoninterfacegeneratorfilename
cm = self.__loadtemplates_firstfiltering__(sm)
self.__expand_secondfiltering__(sm, cm)
# Round-trip Code Preservation. Will load the code to preserve upon creation (if the output dir is not-empty/the same as the one in the compile path).
# TCP gen might have a different output directory (typically COG will put files into an intermediate dir, and them copy them elsewhere
preservation = None
if preserve_dir == "":
preservation = Preservative(self.output_gen_file_dir)
else:
preservation = Preservative(preserve_dir)
preservation.Emplace(cm.filenames_to_lines)
# Write output to file.
self.__createoutput__(cm.filenames_to_lines)
# return the filenames
filenames = []
for filename in cm.filenames_to_lines.keys():
filenames.append(filename)
return filenames
|
[
"cgen.even_space",
"cgen.FileCopyUtil",
"cgen.CBASEGenerator.__init__",
"cgen.CBASEGenerator.__loadtemplates_firstfiltering__",
"cgen.caps",
"cgen.camel_case_small",
"cgen.__getnextalphabet__",
"cgen.__resetalphabet__"
] |
[((11848, 11942), 'cgen.CBASEGenerator.__init__', 'CBASEGenerator.__init__', (['self', 'inputfiledir', 'outputfiledir', 'language', 'author', 'group', 'brief'], {}), '(self, inputfiledir, outputfiledir, language, author,\n group, brief)\n', (11871, 11942), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((12386, 12416), 'cgen.caps', 'caps', (['smmodel.statemachinename'], {}), '(smmodel.statemachinename)\n', (12390, 12416), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((13644, 13751), 'cgen.CBASEGenerator.__loadtemplates_firstfiltering__', 'CBASEGenerator.__loadtemplates_firstfiltering__', (['self', 'dict_to_replace_lines', 'dict_to_replace_filenames'], {}), '(self, dict_to_replace_lines,\n dict_to_replace_filenames)\n', (13691, 13751), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((15877, 15896), 'cgen.__resetalphabet__', '__resetalphabet__', ([], {}), '()\n', (15894, 15896), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((18475, 18494), 'cgen.__resetalphabet__', '__resetalphabet__', ([], {}), '()\n', (18492, 18494), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((29977, 29996), 'cgen.__resetalphabet__', '__resetalphabet__', ([], {}), '()\n', (29994, 29996), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((30725, 30744), 'cgen.__resetalphabet__', '__resetalphabet__', ([], {}), '()\n', (30742, 30744), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((18347, 18368), 'cgen.__getnextalphabet__', '__getnextalphabet__', ([], {}), '()\n', (18366, 18368), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((19213, 19234), 'cgen.__getnextalphabet__', '__getnextalphabet__', ([], {}), '()\n', (19232, 19234), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((30511, 30532), 'cgen.__getnextalphabet__', '__getnextalphabet__', ([], {}), '()\n', (30530, 30532), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((31237, 31258), 'cgen.__getnextalphabet__', '__getnextalphabet__', ([], {}), '()\n', (31256, 31258), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((37481, 37542), 'cgen.FileCopyUtil', 'FileCopyUtil', (['allplatformsfrom', 'allplatformsto', 'files_to_copy'], {}), '(allplatformsfrom, allplatformsto, files_to_copy)\n', (37493, 37542), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((37830, 37876), 'cgen.FileCopyUtil', 'FileCopyUtil', (['smlfrom', 'smlto', 'smlfiles_to_copy'], {}), '(smlfrom, smlto, smlfiles_to_copy)\n', (37842, 37876), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((38284, 38361), 'cgen.FileCopyUtil', 'FileCopyUtil', (['tests_allplatformsfrom', 'tests_allplatformsto', 'testfiles_to_copy'], {}), '(tests_allplatformsfrom, tests_allplatformsto, testfiles_to_copy)\n', (38296, 38361), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((38746, 38841), 'cgen.FileCopyUtil', 'FileCopyUtil', (['microunit_allplatformsfrom', 'microunit_allplatformsto', 'microunit_files_to_copy'], {}), '(microunit_allplatformsfrom, microunit_allplatformsto,\n microunit_files_to_copy)\n', (38758, 38841), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((16146, 16168), 'cgen.camel_case_small', 'camel_case_small', (['name'], {}), '(name)\n', (16162, 16168), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((23800, 23819), 'cgen.even_space', 'even_space', (['"""Guard"""'], {}), "('Guard')\n", (23810, 23819), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((25052, 25073), 'cgen.even_space', 'even_space', (['""" = Next"""'], {}), "(' = Next')\n", (25062, 25073), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((27324, 27345), 'cgen.even_space', 'even_space', (['""" = Next"""'], {}), "(' = Next')\n", (27334, 27345), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((23777, 23797), 'cgen.even_space', 'even_space', (['"""Action"""'], {}), "('Action')\n", (23787, 23797), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((25027, 25049), 'cgen.even_space', 'even_space', (['"""/ Action"""'], {}), "('/ Action')\n", (25037, 25049), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((27294, 27321), 'cgen.even_space', 'even_space', (['"""/ Action"""', '(100)'], {}), "('/ Action', 100)\n", (27304, 27321), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((23756, 23774), 'cgen.even_space', 'even_space', (['"""Next"""'], {}), "('Next')\n", (23766, 23774), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((25000, 25024), 'cgen.even_space', 'even_space', (['"""[ Guard ] """'], {}), "('[ Guard ] ')\n", (25010, 25024), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((27267, 27291), 'cgen.even_space', 'even_space', (['"""[ Guard ] """'], {}), "('[ Guard ] ')\n", (27277, 27291), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((23734, 23753), 'cgen.even_space', 'even_space', (['"""Event"""'], {}), "('Event')\n", (23744, 23753), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((24978, 24997), 'cgen.even_space', 'even_space', (['"""Event"""'], {}), "('Event')\n", (24988, 24997), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((27245, 27264), 'cgen.even_space', 'even_space', (['"""Event"""'], {}), "('Event')\n", (27255, 27264), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((23712, 23731), 'cgen.even_space', 'even_space', (['"""Start"""'], {}), "('Start')\n", (23722, 23731), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((24953, 24975), 'cgen.even_space', 'even_space', (['"""Start + """'], {}), "('Start + ')\n", (24963, 24975), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((27220, 27242), 'cgen.even_space', 'even_space', (['"""Start + """'], {}), "('Start + ')\n", (27230, 27242), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n'), ((18956, 18983), 'cgen.camel_case_small', 'camel_case_small', (['eventname'], {}), '(eventname)\n', (18972, 18983), False, 'from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case\n')]
|
from flask_mail import Message
from flask import render_template
from flask_start.extensions import mail
'''
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
'''
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
#Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('Reset Your Password',
sender='<EMAIL>',
recipients=[user.email],
text_body=render_template('public/reset_password_mail.txt',
user=user, token=token),
html_body=render_template('public/reset_password_mail.html',
user=user, token=token))
|
[
"flask.render_template",
"flask_mail.Message",
"flask_start.extensions.mail.send"
] |
[((305, 359), 'flask_mail.Message', 'Message', (['subject'], {'sender': 'sender', 'recipients': 'recipients'}), '(subject, sender=sender, recipients=recipients)\n', (312, 359), False, 'from flask_mail import Message\n'), ((414, 428), 'flask_start.extensions.mail.send', 'mail.send', (['msg'], {}), '(msg)\n', (423, 428), False, 'from flask_start.extensions import mail\n'), ((710, 783), 'flask.render_template', 'render_template', (['"""public/reset_password_mail.txt"""'], {'user': 'user', 'token': 'token'}), "('public/reset_password_mail.txt', user=user, token=token)\n", (725, 783), False, 'from flask import render_template\n'), ((851, 925), 'flask.render_template', 'render_template', (['"""public/reset_password_mail.html"""'], {'user': 'user', 'token': 'token'}), "('public/reset_password_mail.html', user=user, token=token)\n", (866, 925), False, 'from flask import render_template\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import os
import numpy
from io import BytesIO
from matplotlib import pyplot
import requests
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from maskrcnn_benchmark.structures.image_list import ImageList
if __name__ == "__main__":
# load config from file and command-line arguments
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cfg.merge_from_file(
os.path.join(project_dir,
"configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"))
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=480,
)
def single_image_to_top_predictions(image):
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
image = image * 255
else:
image = image[[2, 1, 0]]
# we absolutely want fixed size (int) here (or we run into a tracing error (or bug?)
# or we might later decide to make things work with variable size...
image = image - torch.tensor(cfg.INPUT.PIXEL_MEAN)[:, None, None]
# should also do variance...
image_list = ImageList(image.unsqueeze(0), [(int(image.size(-2)), int(image.size(-1)))])
result, = coco_demo.model(image_list)
scores = result.get_field("scores")
keep = (scores >= coco_demo.confidence_threshold)
result = (result.bbox[keep],
result.get_field("labels")[keep],
result.get_field("mask")[keep],
scores[keep])
return result
@torch.jit.script
def my_paste_mask(mask, bbox, height, width, threshold=0.5, padding=1, contour=True, rectangle=False):
# type: (Tensor, Tensor, int, int, float, int, bool, bool) -> Tensor
padded_mask = torch.constant_pad_nd(mask, (padding, padding, padding, padding))
scale = 1.0 + 2.0 * float(padding) / float(mask.size(-1))
center_x = (bbox[2] + bbox[0]) * 0.5
center_y = (bbox[3] + bbox[1]) * 0.5
w_2 = (bbox[2] - bbox[0]) * 0.5 * scale
h_2 = (bbox[3] - bbox[1]) * 0.5 * scale # should have two scales?
bbox_scaled = torch.stack([center_x - w_2, center_y - h_2,
center_x + w_2, center_y + h_2], 0)
TO_REMOVE = 1
w = (bbox_scaled[2] - bbox_scaled[0] + TO_REMOVE).clamp(min=1).long()
h = (bbox_scaled[3] - bbox_scaled[1] + TO_REMOVE).clamp(min=1).long()
scaled_mask = torch.ops.maskrcnn_benchmark.upsample_bilinear(padded_mask.float(), h, w)
x0 = bbox_scaled[0].long()
y0 = bbox_scaled[1].long()
x = x0.clamp(min=0)
y = y0.clamp(min=0)
leftcrop = x - x0
topcrop = y - y0
w = torch.min(w - leftcrop, width - x)
h = torch.min(h - topcrop, height - y)
# mask = torch.zeros((height, width), dtype=torch.uint8)
# mask[y:y + h, x:x + w] = (scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold)
mask = torch.constant_pad_nd((scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold),
(int(x), int(width - x - w), int(y), int(height - y - h))) # int for the script compiler
if contour:
mask = mask.float()
# poor person's contour finding by comparing to smoothed
mask = (mask - torch.nn.functional.conv2d(mask.unsqueeze(0).unsqueeze(0),
torch.full((1, 1, 3, 3), 1.0 / 9.0), padding=1)[0, 0]).abs() > 0.001
if rectangle:
x = torch.arange(width, dtype=torch.long).unsqueeze(0)
y = torch.arange(height, dtype=torch.long).unsqueeze(1)
r = bbox.long()
# work around script not liking bitwise ops
rectangle_mask = ((((x == r[0]) + (x == r[2])) * (y >= r[1]) * (y <= r[3]))
+ (((y == r[1]) + (y == r[3])) * (x >= r[0]) * (x <= r[2])))
mask = (mask + rectangle_mask).clamp(max=1)
return mask
@torch.jit.script
def add_annotations(image, labels, scores, bboxes, class_names=','.join(coco_demo.CATEGORIES), color=torch.tensor([255, 255, 255], dtype=torch.long)):
# type: (Tensor, Tensor, Tensor, Tensor, str, Tensor) -> Tensor
result_image = torch.ops.maskrcnn_benchmark.add_annotations(image, labels, scores, bboxes, class_names, color)
return result_image
@torch.jit.script
def combine_masks(image, labels, masks, scores, bboxes, threshold=0.5, padding=1, contour=True, rectangle=False, palette=torch.tensor([33554431, 32767, 2097151])):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, int, bool, bool, Tensor) -> Tensor
height = image.size(0)
width = image.size(1)
image_with_mask = image.clone()
for i in range(masks.size(0)):
color = ((palette * labels[i]) % 255).to(torch.uint8)
one_mask = my_paste_mask(masks[i, 0], bboxes[i], height, width, threshold, padding, contour, rectangle)
image_with_mask = torch.where(one_mask.unsqueeze(-1), color.unsqueeze(0).unsqueeze(0), image_with_mask)
image_with_mask = add_annotations(image_with_mask, labels, scores, bboxes)
return image_with_mask
def process_image_with_traced_model(image):
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
boxes, labels, masks, scores = traced_model(image)
# todo: make this in one large thing
result_image = combine_masks(original_image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
def fetch_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")
if __name__ == "__main__":
pil_image = fetch_image(
url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# convert to BGR format
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
traced_model = torch.jit.trace(single_image_to_top_predictions, (image,))
@torch.jit.script
def end_to_end_model(image):
boxes, labels, masks, scores = traced_model(image)
result_image = combine_masks(image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
end_to_end_model.save('end_to_end_model.pt')
result_image = process_image_with_traced_model(original_image)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image[:, :, [2, 1, 0]])
pyplot.show()
# second image
image2 = fetch_image(
url='http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg')
image2 = image2.resize((640, 480), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
result_image2 = process_image_with_traced_model(image2)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image2[:, :, [2, 1, 0]])
pyplot.show()
|
[
"torch.jit.trace",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"torch.full",
"torch.stack",
"os.path.join",
"io.BytesIO",
"maskrcnn_benchmark.config.cfg.merge_from_list",
"torch.min",
"requests.get",
"torch.tensor",
"predictor.COCODemo",
"numpy.array",
"torch.ops.maskrcnn_benchmark.add_annotations",
"os.path.abspath",
"maskrcnn_benchmark.config.cfg.freeze",
"torch.arange",
"torch.constant_pad_nd"
] |
[((673, 717), 'maskrcnn_benchmark.config.cfg.merge_from_list', 'cfg.merge_from_list', (["['MODEL.DEVICE', 'cpu']"], {}), "(['MODEL.DEVICE', 'cpu'])\n", (692, 717), False, 'from maskrcnn_benchmark.config import cfg\n'), ((722, 734), 'maskrcnn_benchmark.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (732, 734), False, 'from maskrcnn_benchmark.config import cfg\n'), ((834, 940), 'predictor.COCODemo', 'COCODemo', (['cfg'], {'confidence_threshold': '(0.7)', 'show_mask_heatmaps': '(False)', 'masks_per_dim': '(2)', 'min_image_size': '(480)'}), '(cfg, confidence_threshold=0.7, show_mask_heatmaps=False,\n masks_per_dim=2, min_image_size=480)\n', (842, 940), False, 'from predictor import COCODemo\n'), ((2346, 2411), 'torch.constant_pad_nd', 'torch.constant_pad_nd', (['mask', '(padding, padding, padding, padding)'], {}), '(mask, (padding, padding, padding, padding))\n', (2367, 2411), False, 'import torch\n'), ((2689, 2774), 'torch.stack', 'torch.stack', (['[center_x - w_2, center_y - h_2, center_x + w_2, center_y + h_2]', '(0)'], {}), '([center_x - w_2, center_y - h_2, center_x + w_2, center_y + h_2], 0\n )\n', (2700, 2774), False, 'import torch\n'), ((3223, 3257), 'torch.min', 'torch.min', (['(w - leftcrop)', '(width - x)'], {}), '(w - leftcrop, width - x)\n', (3232, 3257), False, 'import torch\n'), ((3266, 3300), 'torch.min', 'torch.min', (['(h - topcrop)', '(height - y)'], {}), '(h - topcrop, height - y)\n', (3275, 3300), False, 'import torch\n'), ((4585, 4632), 'torch.tensor', 'torch.tensor', (['[255, 255, 255]'], {'dtype': 'torch.long'}), '([255, 255, 255], dtype=torch.long)\n', (4597, 4632), False, 'import torch\n'), ((4722, 4821), 'torch.ops.maskrcnn_benchmark.add_annotations', 'torch.ops.maskrcnn_benchmark.add_annotations', (['image', 'labels', 'scores', 'bboxes', 'class_names', 'color'], {}), '(image, labels, scores, bboxes,\n class_names, color)\n', (4766, 4821), False, 'import torch\n'), ((4983, 5023), 'torch.tensor', 'torch.tensor', (['[33554431, 32767, 2097151]'], {}), '([33554431, 32767, 2097151])\n', (4995, 5023), False, 'import torch\n'), ((6190, 6207), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6202, 6207), False, 'import requests\n'), ((6845, 6903), 'torch.jit.trace', 'torch.jit.trace', (['single_image_to_top_predictions', '(image,)'], {}), '(single_image_to_top_predictions, (image,))\n', (6860, 6903), False, 'import torch\n'), ((7306, 7350), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['result_image[:, :, [2, 1, 0]]'], {}), '(result_image[:, :, [2, 1, 0]])\n', (7319, 7350), False, 'from matplotlib import pyplot\n'), ((7355, 7368), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7366, 7368), False, 'from matplotlib import pyplot\n'), ((7720, 7765), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['result_image2[:, :, [2, 1, 0]]'], {}), '(result_image2[:, :, [2, 1, 0]])\n', (7733, 7765), False, 'from matplotlib import pyplot\n'), ((7770, 7783), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7781, 7783), False, 'from matplotlib import pyplot\n'), ((565, 650), 'os.path.join', 'os.path.join', (['project_dir', '"""configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"""'], {}), "(project_dir,\n 'configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml')\n", (577, 650), False, 'import os\n'), ((504, 529), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (519, 529), False, 'import os\n'), ((1647, 1681), 'torch.tensor', 'torch.tensor', (['cfg.INPUT.PIXEL_MEAN'], {}), '(cfg.INPUT.PIXEL_MEAN)\n', (1659, 1681), False, 'import torch\n'), ((6465, 6487), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (6476, 6487), False, 'import numpy\n'), ((7578, 7597), 'numpy.array', 'numpy.array', (['image2'], {}), '(image2)\n', (7589, 7597), False, 'import numpy\n'), ((4034, 4071), 'torch.arange', 'torch.arange', (['width'], {'dtype': 'torch.long'}), '(width, dtype=torch.long)\n', (4046, 4071), False, 'import torch\n'), ((4097, 4135), 'torch.arange', 'torch.arange', (['height'], {'dtype': 'torch.long'}), '(height, dtype=torch.long)\n', (4109, 4135), False, 'import torch\n'), ((6230, 6255), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (6237, 6255), False, 'from io import BytesIO\n'), ((3935, 3970), 'torch.full', 'torch.full', (['(1, 1, 3, 3)', '(1.0 / 9.0)'], {}), '((1, 1, 3, 3), 1.0 / 9.0)\n', (3945, 3970), False, 'import torch\n')]
|
from django.conf.urls import patterns, url
from temperature import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^save_temp_reading$', views.save_temp_reading, name='save_temp_reading'),
)
|
[
"django.conf.urls.url"
] |
[((113, 149), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (116, 149), False, 'from django.conf.urls import patterns, url\n'), ((161, 238), 'django.conf.urls.url', 'url', (['"""^save_temp_reading$"""', 'views.save_temp_reading'], {'name': '"""save_temp_reading"""'}), "('^save_temp_reading$', views.save_temp_reading, name='save_temp_reading')\n", (164, 238), False, 'from django.conf.urls import patterns, url\n')]
|
"""
File: commands/calc.py
Purpose: Performs calculations in response to user input, and outputs the result
"""
from sys import argv
import click
from calculator import *
from models import History
from models.Config import Config
from help_menus import calc_help
@click.group("calc", invoke_without_command=True)
@click.option("-M", "--mass-spec",
is_flag=True, default=False,
help="Get a theoretical mass spectrum of a molecule")
@click.option("-i", "--histogram",
is_flag=True, default=False,
help="Use with -M/--mass-spec to display the mass spec as a histogram")
@click.argument("formula", required=False)
def calc(mass_spec, histogram, formula):
config = Config.setup() # todo: Pass as context
if not any(locals().items()) or len(argv) == 2:
calc_help()
else:
if mass_spec:
click.echo(get_mass_spec(formula, histogram))
else:
mass = History.get(formula)["mass"] or get_mass(formula)
click.echo("%.3f %s" % (mass, config.units))
|
[
"click.argument",
"models.Config.Config.setup",
"click.group",
"click.option",
"help_menus.calc_help",
"click.echo",
"models.History.get"
] |
[((270, 318), 'click.group', 'click.group', (['"""calc"""'], {'invoke_without_command': '(True)'}), "('calc', invoke_without_command=True)\n", (281, 318), False, 'import click\n'), ((320, 441), 'click.option', 'click.option', (['"""-M"""', '"""--mass-spec"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Get a theoretical mass spectrum of a molecule"""'}), "('-M', '--mass-spec', is_flag=True, default=False, help=\n 'Get a theoretical mass spectrum of a molecule')\n", (332, 441), False, 'import click\n'), ((466, 605), 'click.option', 'click.option', (['"""-i"""', '"""--histogram"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Use with -M/--mass-spec to display the mass spec as a histogram"""'}), "('-i', '--histogram', is_flag=True, default=False, help=\n 'Use with -M/--mass-spec to display the mass spec as a histogram')\n", (478, 605), False, 'import click\n'), ((630, 671), 'click.argument', 'click.argument', (['"""formula"""'], {'required': '(False)'}), "('formula', required=False)\n", (644, 671), False, 'import click\n'), ((726, 740), 'models.Config.Config.setup', 'Config.setup', ([], {}), '()\n', (738, 740), False, 'from models.Config import Config\n'), ((826, 837), 'help_menus.calc_help', 'calc_help', ([], {}), '()\n', (835, 837), False, 'from help_menus import calc_help\n'), ((1023, 1067), 'click.echo', 'click.echo', (["('%.3f %s' % (mass, config.units))"], {}), "('%.3f %s' % (mass, config.units))\n", (1033, 1067), False, 'import click\n'), ((961, 981), 'models.History.get', 'History.get', (['formula'], {}), '(formula)\n', (972, 981), False, 'from models import History\n')]
|
"""LS-Dyna license server interface."""
import typing
from lm_agent.config import settings
from lm_agent.exceptions import LicenseManagerBadServerOutput
from lm_agent.parsing import lsdyna
from lm_agent.server_interfaces.license_server_interface import LicenseReportItem, LicenseServerInterface
from lm_agent.server_interfaces.utils import run_command
class LSDynaLicenseServer(LicenseServerInterface):
"""Extract license information from LS-Dyna license server."""
def __init__(self, license_servers: typing.List[str]):
"""Initialize the license server instance with the license server host and parser."""
self.license_servers = license_servers
self.parser = lsdyna.parse
def get_commands_list(self):
"""Generate a list of commands with the available license server hosts."""
host_ports = [(server.split(":")[1:]) for server in self.license_servers]
commands_to_run = []
for host, port in host_ports:
command_line = f"{settings.LSDYNA_PATH} -s {port}@{host} -R"
commands_to_run.append(command_line)
return commands_to_run
async def get_output_from_server(self):
"""Override abstract method to get output from Ls-Dyna license server."""
# get the list of commands for each license server host
commands_to_run = self.get_commands_list()
# run each command in the list, one at a time, until one succeds
for cmd in commands_to_run:
output = await run_command(cmd)
# try the next server if the previous didn't return the expected data
if output is None:
continue
return output
raise RuntimeError("None of the checks for LS-Dyna succeeded!")
async def get_report_item(self, product_feature: str):
"""Override abstract method to parse LS-Dyna license server output into License Report Item."""
server_output = await self.get_output_from_server()
parsed_output = self.parser(server_output)
(_, feature) = product_feature.split(".")
current_feature_item = parsed_output.get(feature)
# raise exception if parser didn't output license information
if current_feature_item is None:
raise LicenseManagerBadServerOutput("Invalid data returned from parser.")
report_item = LicenseReportItem(
product_feature=product_feature,
used=current_feature_item["used"],
total=current_feature_item["total"],
used_licenses=current_feature_item["uses"],
)
return report_item
|
[
"lm_agent.exceptions.LicenseManagerBadServerOutput",
"lm_agent.server_interfaces.utils.run_command",
"lm_agent.server_interfaces.license_server_interface.LicenseReportItem"
] |
[((2371, 2546), 'lm_agent.server_interfaces.license_server_interface.LicenseReportItem', 'LicenseReportItem', ([], {'product_feature': 'product_feature', 'used': "current_feature_item['used']", 'total': "current_feature_item['total']", 'used_licenses': "current_feature_item['uses']"}), "(product_feature=product_feature, used=\n current_feature_item['used'], total=current_feature_item['total'],\n used_licenses=current_feature_item['uses'])\n", (2388, 2546), False, 'from lm_agent.server_interfaces.license_server_interface import LicenseReportItem, LicenseServerInterface\n'), ((2280, 2347), 'lm_agent.exceptions.LicenseManagerBadServerOutput', 'LicenseManagerBadServerOutput', (['"""Invalid data returned from parser."""'], {}), "('Invalid data returned from parser.')\n", (2309, 2347), False, 'from lm_agent.exceptions import LicenseManagerBadServerOutput\n'), ((1509, 1525), 'lm_agent.server_interfaces.utils.run_command', 'run_command', (['cmd'], {}), '(cmd)\n', (1520, 1525), False, 'from lm_agent.server_interfaces.utils import run_command\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
def save_tb_embeddings(embeddings_filename):
f = open(embeddings_filename, 'rb')
embeddings = pickle.load(f)
images = embeddings['images']
zs = embeddings['zs']
# overwrite Tensorboard log dir if necessary
if os.path.exists(TB_DIR):
shutil.rmtree(TB_DIR)
os.makedirs(TB_DIR)
# create grid image
img_width, img_height = save_sprite_image(images)
with tf.device('cpu:0'):
# create embedding var
embedding_var = tf.Variable(initial_value=zs)
# save projector config
summary_writer = tf.summary.FileWriter(TB_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
embedding.sprite.single_image_dim.extend([img_width, img_height])
projector.visualize_embeddings(summary_writer, config)
# save embeddings
sess = tf.Session()
sess.run(embedding_var.initializer)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
def save_sprite_image(images):
n_embeddings = images.shape[0]
grid_cols = int(np.sqrt(n_embeddings))
grid_rows = int(np.ceil(float(n_embeddings) / grid_cols))
img_height, img_width, img_channels = images[0].shape
grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels))
for i, image in enumerate(images):
row = i / grid_cols
col = i % grid_cols
x = img_width * col
y = img_height * row
grid_image[y:y + img_height, x:x + img_width] = image
grid_image = PIL.Image.fromarray(grid_image.astype('uint8'))
grid_image.save(SPRITE_IMAGE_FILENAME)
return img_width, img_height
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print(('%s: %s' % (type(e).__name__, e.message)))
raise
|
[
"os.path.exists",
"tensorflow.device",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"numpy.sqrt",
"os.makedirs",
"argparse.ArgumentParser",
"tensorflow.Variable",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"tensorflow.Session",
"pickle.load",
"os.path.join",
"tensorflow.train.Saver",
"os.getcwd",
"numpy.empty",
"shutil.rmtree",
"tensorflow.summary.FileWriter"
] |
[((334, 368), 'os.path.join', 'os.path.join', (['TB_DIR', '"""sprite.png"""'], {}), "(TB_DIR, 'sprite.png')\n", (346, 368), False, 'import os\n'), ((287, 298), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (296, 298), False, 'import os\n'), ((473, 487), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (484, 487), False, 'import pickle\n'), ((606, 628), 'os.path.exists', 'os.path.exists', (['TB_DIR'], {}), '(TB_DIR)\n', (620, 628), False, 'import os\n'), ((664, 683), 'os.makedirs', 'os.makedirs', (['TB_DIR'], {}), '(TB_DIR)\n', (675, 683), False, 'import os\n'), ((1759, 1830), 'numpy.empty', 'np.empty', (['(img_height * grid_rows, img_width * grid_cols, img_channels)'], {}), '((img_height * grid_rows, img_width * grid_cols, img_channels))\n', (1767, 1830), True, 'import numpy as np\n'), ((2229, 2291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Inference tool - DIGITS"""'}), "(description='Inference tool - DIGITS')\n", (2252, 2291), False, 'import argparse\n'), ((638, 659), 'shutil.rmtree', 'shutil.rmtree', (['TB_DIR'], {}), '(TB_DIR)\n', (651, 659), False, 'import shutil\n'), ((773, 791), 'tensorflow.device', 'tf.device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (782, 791), True, 'import tensorflow as tf\n'), ((848, 877), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'zs'}), '(initial_value=zs)\n', (859, 877), True, 'import tensorflow as tf\n'), ((936, 965), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['TB_DIR'], {}), '(TB_DIR)\n', (957, 965), True, 'import tensorflow as tf\n'), ((983, 1010), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (1008, 1010), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((1248, 1302), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['summary_writer', 'config'], {}), '(summary_writer, config)\n', (1278, 1302), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((1345, 1357), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1355, 1357), True, 'import tensorflow as tf\n'), ((1418, 1449), 'tensorflow.train.Saver', 'tf.train.Saver', (['[embedding_var]'], {}), '([embedding_var])\n', (1432, 1449), True, 'import tensorflow as tf\n'), ((1599, 1620), 'numpy.sqrt', 'np.sqrt', (['n_embeddings'], {}), '(n_embeddings)\n', (1606, 1620), True, 'import numpy as np\n'), ((1475, 1509), 'os.path.join', 'os.path.join', (['TB_DIR', '"""model.ckpt"""'], {}), "(TB_DIR, 'model.ckpt')\n", (1487, 1509), False, 'import os\n')]
|
from datetime import date
now = date.today()
print('The date today is', now, now.strftime("%A"))
|
[
"datetime.date.today"
] |
[((32, 44), 'datetime.date.today', 'date.today', ([], {}), '()\n', (42, 44), False, 'from datetime import date\n')]
|
import tvm
import sys
import time
import numpy as np
from tvm.tensor_graph.testing.models import resnet
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import CELoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler
from tvm.tensor_graph.core.scheduler import schedule_all
from tvm.tensor_graph.core.build_graph import build_all
from tvm.tensor_graph.core.runtime import run_all
def test1():
print("test 1 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
begin = time.time()
# change data layout
forward_space = ForwardGraphSpace()
forward_tuner = RandomForwardTuner(forward_space)
layout_generator = LayoutTransform(fwd_graph, forward_space, forward_tuner)
fgraph = layout_generator.generate()
after_layout = time.time()
# autodiff
bgraph = fgraph.make_backward(ce_loss, sgd)
after_autodiff = time.time()
# make tir graph
inputs = [x.tvm_tensor for x in bgraph.inputs]
weights = [x.tvm_tensor for x in bgraph.weights]
outputs = [x.tvm_tensor for x in bgraph.outputs]
# labels = [x.tvm_tensor for x in bgraph.labels]
# loss = bgraph.loss.tvm_tensor
# gradients = [x.tvm_tensor for x in bgraph.gradients]
# updates = [x.tvm_tensor for x in bgraph.updates]
labels = []
loss = None
gradients = []
lr = None
updates = []
tgraph = PyTIRGraph(
inputs,
labels,
outputs,
weights,
loss,
gradients,
lr,
updates)
after_tir_graph = time.time()
# subgraph partition
partition_space = PartitionSpace()
partition_tuner = RandomPartitionTuner(partition_space)
cut_candidates = form_cut_candidates(tgraph)
# print(cut_candidates)
for i, candidate in enumerate(cut_candidates):
name = "graph_cut_" + str(i)
partition_generator = SingleCut(tgraph, name, candidate, partition_space, partition_tuner)
partition_generator.generate()
# for op, stat in tgraph.op_stat_dict.items():
# print(op, " head=", stat.head)
tgraph.partition_graph()
after_partition = time.time()
print("num subgraphs:", len(tgraph.subgraphs))
target = "cuda"
dev = 0
# update the op stat dict of subgraphs
# do auto-schedule
total_build_trials = 0
build_time_record = []
for mark, subgraph in tgraph.subgraphs.items():
# print("subgraph", mark)
tensors = list(subgraph.outputs.keys()) + list(subgraph.loss.keys()) \
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = tgraph.op_map[op]
if v in tgraph.op_stat_dict:
op_stat_dict[op] = tgraph.op_stat_dict[v]
c_list = form_connected_sets(subgraph, op_stat_dict, tensors, ops, down_graph)
# print("c_list_length=", len(c_list))
# print("check connected set")
# for connected_set in c_list:
# print(connected_set)
scheduler = Scheduler()
# sch = tgraph.schedules[mark]
for i, connected_set in enumerate(c_list):
name = "subgraph_" + str(mark) + "_connect_" + str(i)
assert not connected_set.empty()
build_success = False
for trial in range(10):
total_build_trials += 1
tgraph.create_schedule_for(mark=mark)
sch = tgraph.schedules[mark]
if connected_set.has_master():
if connected_set.iso_base():
PrimitiveScheduler = GPUScheduleMasterBaseSet
else:
PrimitiveScheduler = GPUScheduleMasterSet
primitive_generator = PrimitiveScheduler(
name, subgraph, connected_set, down_graph, op_stat_dict, scheduler)
else:
PrimitiveScheduler = GPUScheduleBaseSet
primitive_generator = PrimitiveScheduler(
name, connected_set, scheduler)
primitive_generator.generate(sch)
# try:
# print(tvm.lower(sch, tgraph.bufs[mark], simple_mode=True))
# except Exception as e:
# print(e)
# print("prologue")
# for p in connected_set.prologue:
# print(p.body)
# print("epilogue")
# for e in connected_set.epilogue:
# print(e.body)
# print("base")
# print(connected_set.base.body)
# print("master")
# print(connected_set.master.body)
# print(connected_set.master.input_tensors)
# for op, master in connected_set.prologue.items():
# in_input = False
# for inp in master.input_tensors:
# if op == inp.op:
# in_input = True
# break
# if not in_input:
# print(op, "not in the inputs of", master)
build_beg = time.time()
build_success = tgraph.build_for(target, mark=mark)
build_end = time.time()
build_time_record.append(build_end - build_beg)
if build_success:
break
if not build_success:
raise RuntimeError("Can't build for subgraph", mark)
after_schedule = time.time()
tgraph.set_inputs({bgraph.inputs[0].tvm_tensor: img_np})
# tgraph.set_labels({bgraph.labels[0].tvm_tensor: label_np})
# tgraph.set_lr(optimize_engine.get_lr())
tgraph.allocate_buffer(target, dev)
beg = time.time()
for mark in tgraph.call_order:
func = tgraph.functions[mark]
bufs = tgraph.bufs[mark]
real_bufs = [tgraph.tvm_array_dict[tgraph.subgraphs[mark].index[x]] for x in bufs]
func_beg = time.time()
func(*real_bufs)
func_end = time.time()
print((func_end - func_beg) * 1e3, "ms")
end = time.time()
print("End to end time:", (end - beg) * 1e3, "ms")
print("total build trails=", total_build_trials)
print("layout change time cost=", (after_layout - begin) * 1e3, "ms")
print("autodiff time cost=", (after_autodiff - after_layout) * 1e3, "ms")
print("make tir_graph time cost=", (after_tir_graph - after_autodiff) * 1e3, "ms")
print("subgraph partition time cost=", (after_partition - after_tir_graph) * 1e3, "ms")
print("schedule time cost=", (after_schedule - after_partition) * 1e3, "ms. average=",
(after_schedule - after_partition) * 1e3 / total_build_trials, "ms")
print("average build time cost=", np.array(build_time_record).mean() * 1e3, "ms")
print("total build time cost=", (after_schedule - begin) * 1e3, "ms")
print("Success!")
def test2(file=sys.stdout):
print("test 2 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())), file=file)
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())), file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
for k, v in tir_graph.op_map.items():
print(k.name, v.name, file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
tmp = {}
for f in set(tir_graph.op_feature_dict.values()):
if f.split(")")[-1] not in tmp:
tmp[f.split(")")[-1]] = []
tmp[f.split(")")[-1]].append(f)
print("different kinds of ops:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
print("####################################################", file=file)
tmp = {}
for f in set(tir_graph.subgraph_features.values()):
key = ";".join([x.split(")")[-1] for x in f.split(";")])
if key not in tmp:
tmp[key] = []
tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
for k, v in tir_graph.subgraph_features.items():
key = ";".join([x.split(")")[-1] for x in v.split(";")])
if key == "collect_3_dim4;grad_bn2d_to_conv2d_nchw_8;grad_bn2d_var_to_conv2d_nchw_10;grad_bn2d_mean_to_conv2d_nchw_2;collect_2_dim1":
i = 1
for op in tir_graph.subgraphs[k].op_list:
print(i, ". #####")
i += 1
print(op.body)
print(op.input_tensors)
break
# target = "cuda"
# dev = 0
# print("begin schedule")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], [label_np], sgd.get_lr(), target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success", file=file)
def test3():
print("test 3 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())))
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())))
tmp = {}
# for f in set(tir_graph.op_feature_dict.values()):
# if f.split(")")[-1] not in tmp:
# tmp[f.split(")")[-1]] = []
# tmp[f.split(")")[-1]].append(f)
# for k, v in tmp.items():
# print(k)
# for vv in v:
# print(" ", vv)
print("####################################################")
tmp = {}
# for f in set(tir_graph.subgraph_features.values()):
# key = ";".join([x.split(")")[-1] for x in f.split(";")])
# if key not in tmp:
# tmp[key] = []
# tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp))
for k, v in tmp.items():
print(k)
for vv in v:
print(" ", vv)
# target = "cuda"
# dev = 1
# print("begin build")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success")
if __name__ == "__main__":
with open("trace_resnet_subgraph.log", "w") as fout:
test2(file=fout)
# test3()
|
[
"tvm.tensor_graph.core.tuner.RandomForwardTuner",
"tvm.tensor_graph.core.schedule_generator.form_cut_candidates",
"tvm.tensor_graph.core.utils.flatten_tir_graph",
"numpy.array",
"tvm.tensor_graph.core.GraphTensor",
"tvm.tensor_graph.core.ForwardGraph",
"tvm.tensor_graph.nn.CELoss",
"tvm.tensor_graph.core.scheduler.PrimitiveScheduler",
"tvm.tensor_graph.core.scheduler.schedule_all",
"tvm.tensor_graph.core.space.ForwardGraphSpace",
"tvm.tensor_graph.nn.SGD",
"tvm.tensor_graph.core.space.PartitionSpace",
"tvm.tensor_graph.core.schedule_generator.form_connected_sets",
"tvm.tensor_graph.core.schedule_generator.LayoutTransform",
"tvm.tensor_graph.testing.models.resnet.resnet50",
"tvm.tensor_graph.core.PyTIRGraph",
"time.time",
"tvm.tensor_graph.core.schedule_generator.SingleCut",
"tvm.tensor_graph.core.tuner.RandomPartitionTuner",
"numpy.random.uniform"
] |
[((1232, 1265), 'tvm.tensor_graph.testing.models.resnet.resnet50', 'resnet.resnet50', ([], {'num_classes': '(1000)'}), '(num_classes=1000)\n', (1247, 1265), False, 'from tvm.tensor_graph.testing.models import resnet\n'), ((1281, 1330), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['img_shape'], {'dtype': 'dtype', 'name': '"""image"""'}), "(img_shape, dtype=dtype, name='image')\n", (1292, 1330), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((1348, 1399), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['label_shape'], {'dtype': 'dtype', 'name': '"""label"""'}), "(label_shape, dtype=dtype, name='label')\n", (1359, 1399), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((1735, 1755), 'tvm.tensor_graph.nn.CELoss', 'CELoss', (['label_tensor'], {}), '(label_tensor)\n', (1741, 1755), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((1764, 1774), 'tvm.tensor_graph.nn.SGD', 'SGD', (['(0.002)'], {}), '(0.002)\n', (1767, 1774), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((1789, 1849), 'tvm.tensor_graph.core.ForwardGraph', 'ForwardGraph', (['[img_tensor]', '[output_tensor]', 'weights_tensors'], {}), '([img_tensor], [output_tensor], weights_tensors)\n', (1801, 1849), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((1861, 1872), 'time.time', 'time.time', ([], {}), '()\n', (1870, 1872), False, 'import time\n'), ((1914, 1933), 'tvm.tensor_graph.core.space.ForwardGraphSpace', 'ForwardGraphSpace', ([], {}), '()\n', (1931, 1933), False, 'from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace\n'), ((1952, 1985), 'tvm.tensor_graph.core.tuner.RandomForwardTuner', 'RandomForwardTuner', (['forward_space'], {}), '(forward_space)\n', (1970, 1985), False, 'from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner\n'), ((2008, 2064), 'tvm.tensor_graph.core.schedule_generator.LayoutTransform', 'LayoutTransform', (['fwd_graph', 'forward_space', 'forward_tuner'], {}), '(fwd_graph, forward_space, forward_tuner)\n', (2023, 2064), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((2121, 2132), 'time.time', 'time.time', ([], {}), '()\n', (2130, 2132), False, 'import time\n'), ((2212, 2223), 'time.time', 'time.time', ([], {}), '()\n', (2221, 2223), False, 'import time\n'), ((2674, 2748), 'tvm.tensor_graph.core.PyTIRGraph', 'PyTIRGraph', (['inputs', 'labels', 'outputs', 'weights', 'loss', 'gradients', 'lr', 'updates'], {}), '(inputs, labels, outputs, weights, loss, gradients, lr, updates)\n', (2684, 2748), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((2803, 2814), 'time.time', 'time.time', ([], {}), '()\n', (2812, 2814), False, 'import time\n'), ((2859, 2875), 'tvm.tensor_graph.core.space.PartitionSpace', 'PartitionSpace', ([], {}), '()\n', (2873, 2875), False, 'from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace\n'), ((2896, 2933), 'tvm.tensor_graph.core.tuner.RandomPartitionTuner', 'RandomPartitionTuner', (['partition_space'], {}), '(partition_space)\n', (2916, 2933), False, 'from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner\n'), ((2954, 2981), 'tvm.tensor_graph.core.schedule_generator.form_cut_candidates', 'form_cut_candidates', (['tgraph'], {}), '(tgraph)\n', (2973, 2981), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((3357, 3368), 'time.time', 'time.time', ([], {}), '()\n', (3366, 3368), False, 'import time\n'), ((6405, 6416), 'time.time', 'time.time', ([], {}), '()\n', (6414, 6416), False, 'import time\n'), ((6631, 6642), 'time.time', 'time.time', ([], {}), '()\n', (6640, 6642), False, 'import time\n'), ((6954, 6965), 'time.time', 'time.time', ([], {}), '()\n', (6963, 6965), False, 'import time\n'), ((7951, 7984), 'tvm.tensor_graph.testing.models.resnet.resnet50', 'resnet.resnet50', ([], {'num_classes': '(1000)'}), '(num_classes=1000)\n', (7966, 7984), False, 'from tvm.tensor_graph.testing.models import resnet\n'), ((8000, 8049), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['img_shape'], {'dtype': 'dtype', 'name': '"""image"""'}), "(img_shape, dtype=dtype, name='image')\n", (8011, 8049), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((8067, 8118), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['label_shape'], {'dtype': 'dtype', 'name': '"""label"""'}), "(label_shape, dtype=dtype, name='label')\n", (8078, 8118), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((8454, 8474), 'tvm.tensor_graph.nn.CELoss', 'CELoss', (['label_tensor'], {}), '(label_tensor)\n', (8460, 8474), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((8483, 8493), 'tvm.tensor_graph.nn.SGD', 'SGD', (['(0.002)'], {}), '(0.002)\n', (8486, 8493), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((8508, 8568), 'tvm.tensor_graph.core.ForwardGraph', 'ForwardGraph', (['[img_tensor]', '[output_tensor]', 'weights_tensors'], {}), '([img_tensor], [output_tensor], weights_tensors)\n', (8520, 8568), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((8584, 8653), 'tvm.tensor_graph.core.scheduler.schedule_all', 'schedule_all', (['fwd_graph'], {'loss': 'ce_loss', 'optimizer': 'sgd', 'inference': '(False)'}), '(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)\n', (8596, 8653), False, 'from tvm.tensor_graph.core.scheduler import schedule_all\n'), ((10961, 10994), 'tvm.tensor_graph.testing.models.resnet.resnet50', 'resnet.resnet50', ([], {'num_classes': '(1000)'}), '(num_classes=1000)\n', (10976, 10994), False, 'from tvm.tensor_graph.testing.models import resnet\n'), ((11010, 11059), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['img_shape'], {'dtype': 'dtype', 'name': '"""image"""'}), "(img_shape, dtype=dtype, name='image')\n", (11021, 11059), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((11077, 11128), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['label_shape'], {'dtype': 'dtype', 'name': '"""label"""'}), "(label_shape, dtype=dtype, name='label')\n", (11088, 11128), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((11464, 11484), 'tvm.tensor_graph.nn.CELoss', 'CELoss', (['label_tensor'], {}), '(label_tensor)\n', (11470, 11484), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((11493, 11503), 'tvm.tensor_graph.nn.SGD', 'SGD', (['(0.002)'], {}), '(0.002)\n', (11496, 11503), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((11518, 11578), 'tvm.tensor_graph.core.ForwardGraph', 'ForwardGraph', (['[img_tensor]', '[output_tensor]', 'weights_tensors'], {}), '([img_tensor], [output_tensor], weights_tensors)\n', (11530, 11578), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((11594, 11617), 'tvm.tensor_graph.core.scheduler.schedule_all', 'schedule_all', (['fwd_graph'], {}), '(fwd_graph)\n', (11606, 11617), False, 'from tvm.tensor_graph.core.scheduler import schedule_all\n'), ((3118, 3186), 'tvm.tensor_graph.core.schedule_generator.SingleCut', 'SingleCut', (['tgraph', 'name', 'candidate', 'partition_space', 'partition_tuner'], {}), '(tgraph, name, candidate, partition_space, partition_tuner)\n', (3127, 3186), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((3848, 3889), 'tvm.tensor_graph.core.utils.flatten_tir_graph', 'flatten_tir_graph', (['ops'], {'output_first': '(True)'}), '(ops, output_first=True)\n', (3865, 3889), False, 'from tvm.tensor_graph.core.utils import flatten_tir_graph\n'), ((4062, 4131), 'tvm.tensor_graph.core.schedule_generator.form_connected_sets', 'form_connected_sets', (['subgraph', 'op_stat_dict', 'tensors', 'ops', 'down_graph'], {}), '(subgraph, op_stat_dict, tensors, ops, down_graph)\n', (4081, 4131), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((4290, 4301), 'tvm.tensor_graph.core.scheduler.PrimitiveScheduler', 'Scheduler', ([], {}), '()\n', (4299, 4301), True, 'from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler\n'), ((6841, 6852), 'time.time', 'time.time', ([], {}), '()\n', (6850, 6852), False, 'import time\n'), ((6889, 6900), 'time.time', 'time.time', ([], {}), '()\n', (6898, 6900), False, 'import time\n'), ((1598, 1633), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'img_shape'], {}), '(-1, 1, img_shape)\n', (1615, 1633), True, 'import numpy as np\n'), ((1661, 1707), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[batch, num_classes]'], {}), '(-1, 1, [batch, num_classes])\n', (1678, 1707), True, 'import numpy as np\n'), ((8317, 8352), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'img_shape'], {}), '(-1, 1, img_shape)\n', (8334, 8352), True, 'import numpy as np\n'), ((8380, 8426), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[batch, num_classes]'], {}), '(-1, 1, [batch, num_classes])\n', (8397, 8426), True, 'import numpy as np\n'), ((11327, 11362), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'img_shape'], {}), '(-1, 1, img_shape)\n', (11344, 11362), True, 'import numpy as np\n'), ((11390, 11436), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[batch, num_classes]'], {}), '(-1, 1, [batch, num_classes])\n', (11407, 11436), True, 'import numpy as np\n'), ((6092, 6103), 'time.time', 'time.time', ([], {}), '()\n', (6101, 6103), False, 'import time\n'), ((6184, 6195), 'time.time', 'time.time', ([], {}), '()\n', (6193, 6195), False, 'import time\n'), ((7596, 7623), 'numpy.array', 'np.array', (['build_time_record'], {}), '(build_time_record)\n', (7604, 7623), True, 'import numpy as np\n')]
|
import datetime
import logging
import os
import re
from collections import OrderedDict
from html import escape
from html.parser import HTMLParser
from io import StringIO
import docutils
import docutils.core
import docutils.io
from docutils.parsers.rst.languages import get_language as get_docutils_lang
from docutils.writers.html4css1 import HTMLTranslator, Writer
from pelican import rstdirectives # NOQA
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.plugins import signals
from pelican.utils import get_date, pelican_open, posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False,
'date': False,
'modified': False,
'status': False,
'category': False,
'author': False,
'save_as': False,
'url': False,
'authors': False,
'slug': False
}
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "<NAME>, <NAME>" or "<NAME>; <NAME>"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader:
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ['rst']
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lang_code = self.settings.get('DEFAULT_LANG', 'en')
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
logger.warning("Docutils has no localization for '%s'."
" Using 'en' instead.", lang_code)
self._language_code = 'en'
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
'Document title missing in file %s: '
'Ensure exactly one top level section',
source_path)
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'language_code': self._language_code,
'halt_level': 2,
'traceback': True,
'warning_stream': StringIO(),
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
# prevent metadata extraction in fields
self._md.preprocessors.deregister('meta')
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif self._in_head and tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += "='{}'".format(escape(v, quote=False))
else:
result += '="{}"'.format(escape(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
if name == 'keywords':
name = 'tags'
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
# same name has already been specified then either convert to
# list or append to list
if isinstance(self.metadata[name], list):
self.metadata[name].append(contents)
else:
self.metadata[name] = [self.metadata[name], contents]
else:
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
reader_metadata = _filter_discardable_metadata(reader_metadata)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
import smartypants
typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
if typogrify_dashes == 'oldschool':
smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == 'oldschool_inverted':
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], str):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else:
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata['modified'] = metadata['date']
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
epm = settings.get('EXTRA_PATH_METADATA', {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
dirpath = posixize_path(os.path.join(path, ''))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': datetime.datetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
|
[
"logging.getLogger",
"pelican.contents.Tag",
"re.compile",
"typogrify.filters.typogrify",
"html.escape",
"datetime.datetime",
"os.path.split",
"pelican.plugins.signals.readers_init.send",
"docutils.parsers.rst.languages.get_language",
"io.StringIO",
"os.path.relpath",
"docutils.writers.html4css1.HTMLTranslator.visit_image",
"pelican.utils.get_date",
"os.path.splitext",
"re.match",
"re.findall",
"pelican.contents.Author",
"pelican.utils.pelican_open",
"markdown.Markdown",
"os.path.join",
"os.path.basename",
"os.stat"
] |
[((1866, 1893), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1883, 1893), False, 'import logging\n'), ((23231, 23596), 're.compile', 're.compile', (['"""\n (?:\n # src before alt\n <img\n [^\\\\>]*\n src=([\'"])(.*?)\\\\1\n [^\\\\>]*\n alt=([\'"])\\\\3\n )|(?:\n # alt before src\n <img\n [^\\\\>]*\n alt=([\'"])\\\\4\n [^\\\\>]*\n src=([\'"])(.*?)\\\\5\n )\n """', 're.X'], {}), '(\n """\n (?:\n # src before alt\n <img\n [^\\\\>]*\n src=([\'"])(.*?)\\\\1\n [^\\\\>]*\n alt=([\'"])\\\\3\n )|(?:\n # alt before src\n <img\n [^\\\\>]*\n alt=([\'"])\\\\4\n [^\\\\>]*\n src=([\'"])(.*?)\\\\5\n )\n """\n , re.X)\n', (23241, 23596), False, 'import re\n'), ((23597, 23622), 're.findall', 're.findall', (['imgs', 'content'], {}), '(imgs, content)\n', (23607, 23622), False, 'import re\n'), ((26443, 26469), 'os.path.split', 'os.path.split', (['source_path'], {}), '(source_path)\n', (26456, 26469), False, 'import os\n'), ((26486, 26512), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (26502, 26512), False, 'import os\n'), ((26526, 26551), 'os.path.basename', 'os.path.basename', (['dirname'], {}), '(dirname)\n', (26542, 26551), False, 'import os\n'), ((1490, 1501), 'pelican.utils.get_date', 'get_date', (['x'], {}), '(x)\n', (1498, 1501), False, 'from pelican.utils import get_date, pelican_open, posixize_path\n'), ((5322, 5360), 'docutils.writers.html4css1.HTMLTranslator.visit_image', 'HTMLTranslator.visit_image', (['self', 'node'], {}), '(self, node)\n', (5348, 5360), False, 'from docutils.writers.html4css1 import HTMLTranslator, Writer\n'), ((6266, 6294), 'docutils.parsers.rst.languages.get_language', 'get_docutils_lang', (['lang_code'], {}), '(lang_code)\n', (6283, 6294), True, 'from docutils.parsers.rst.languages import get_language as get_docutils_lang\n'), ((11643, 11680), 'markdown.Markdown', 'Markdown', ([], {}), "(**self.settings['MARKDOWN'])\n", (11651, 11680), False, 'from markdown import Markdown\n'), ((18377, 18408), 'pelican.plugins.signals.readers_init.send', 'signals.readers_init.send', (['self'], {}), '(self)\n', (18402, 18408), False, 'from pelican.plugins import signals\n'), ((8406, 8416), 'io.StringIO', 'StringIO', ([], {}), '()\n', (8414, 8416), False, 'from io import StringIO\n'), ((11694, 11719), 'pelican.utils.pelican_open', 'pelican_open', (['source_path'], {}), '(source_path)\n', (11706, 11719), False, 'from pelican.utils import get_date, pelican_open, posixize_path\n'), ((17106, 17128), 'pelican.utils.pelican_open', 'pelican_open', (['filename'], {}), '(filename)\n', (17118, 17128), False, 'from pelican.utils import get_date, pelican_open, posixize_path\n'), ((19361, 19390), 'os.path.join', 'os.path.join', (['base_path', 'path'], {}), '(base_path, path)\n', (19373, 19390), False, 'import os\n'), ((19428, 19460), 'os.path.relpath', 'os.path.relpath', (['path', 'base_path'], {}), '(path, base_path)\n', (19443, 19460), False, 'import os\n'), ((23714, 23751), 'os.path.basename', 'os.path.basename', (['(match[1] + match[5])'], {}), '(match[1] + match[5])\n', (23730, 23751), False, 'import os\n'), ((1330, 1341), 'pelican.contents.Tag', 'Tag', (['tag', 'y'], {}), '(tag, y)\n', (1333, 1341), False, 'from pelican.contents import Author, Category, Page, Tag\n'), ((1722, 1739), 'pelican.contents.Author', 'Author', (['author', 'y'], {}), '(author, y)\n', (1728, 1739), False, 'from pelican.contents import Author, Category, Page, Tag\n'), ((14569, 14580), 'html.escape', 'escape', (['tag'], {}), '(tag)\n', (14575, 14580), False, 'from html import escape\n'), ((19626, 19648), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (19642, 19648), False, 'import os\n'), ((24503, 24537), 'pelican.utils.get_date', 'get_date', (["settings['DEFAULT_DATE']"], {}), "(settings['DEFAULT_DATE'])\n", (24511, 24537), False, 'from pelican.utils import get_date, pelican_open, posixize_path\n'), ((24591, 24635), 'datetime.datetime', 'datetime.datetime', (["*settings['DEFAULT_DATE']"], {}), "(*settings['DEFAULT_DATE'])\n", (24608, 24635), False, 'import datetime\n'), ((25488, 25510), 'os.path.join', 'os.path.join', (['path', '""""""'], {}), "(path, '')\n", (25500, 25510), False, 'import os\n'), ((26967, 26989), 're.match', 're.match', (['regexp', 'data'], {}), '(regexp, data)\n', (26975, 26989), False, 'import re\n'), ((14645, 14654), 'html.escape', 'escape', (['k'], {}), '(k)\n', (14651, 14654), False, 'from html import escape\n'), ((22097, 22152), 'typogrify.filters.typogrify', 'typogrify', (['text', "self.settings['TYPOGRIFY_IGNORE_TAGS']"], {}), "(text, self.settings['TYPOGRIFY_IGNORE_TAGS'])\n", (22106, 22152), False, 'from typogrify.filters import typogrify\n'), ((24886, 24904), 'os.stat', 'os.stat', (['full_path'], {}), '(full_path)\n', (24893, 24904), False, 'import os\n'), ((22263, 22278), 'typogrify.filters.typogrify', 'typogrify', (['text'], {}), '(text)\n', (22272, 22278), False, 'from typogrify.filters import typogrify\n'), ((14921, 14943), 'html.escape', 'escape', (['v'], {'quote': '(False)'}), '(v, quote=False)\n', (14927, 14943), False, 'from html import escape\n'), ((15020, 15042), 'html.escape', 'escape', (['v'], {'quote': '(False)'}), '(v, quote=False)\n', (15026, 15042), False, 'from html import escape\n'), ((13859, 13870), 'html.escape', 'escape', (['tag'], {}), '(tag)\n', (13865, 13870), False, 'from html import escape\n')]
|
# coding: utf-8
from __future__ import division, print_function
# Standard library
import time
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
from astropy.extern.six.moves import cPickle as pickle
import pytest
# Project
from ..io import load
from ..core import CompositePotential
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from ....integrate import LeapfrogIntegrator
def partial_derivative(func, point, dim_ix=0, **kwargs):
xyz = np.array(point, copy=True)
def wraps(a):
xyz[dim_ix] = a
return func(xyz)
return derivative(wraps, point[dim_ix], **kwargs)
class PotentialTestBase(object):
name = None
potential = None # MUST SET THIS
tol = 1E-5
show_plots = False
@classmethod
def setup_class(cls):
if cls.name is None:
cls.name = cls.__name__[4:] # remove Test
print("Testing potential: {}".format(cls.name))
cls.w0 = np.array(cls.w0)
cls.ndim = cls.w0.size // 2
# TODO: need to test also quantity objects and phasespacepositions!
# these are arrays we will test the methods on:
w0_2d = np.repeat(cls.w0[:,None], axis=1, repeats=16)
w0_3d = np.repeat(w0_2d[...,None], axis=2, repeats=8)
w0_list = list(cls.w0)
w0_slice = w0_2d[:,:4]
cls.w0s = [cls.w0, w0_2d, w0_3d, w0_list, w0_slice]
cls._grad_return_shapes = [cls.w0[:cls.ndim].shape + (1,),
w0_2d[:cls.ndim].shape,
w0_3d[:cls.ndim].shape,
cls.w0[:cls.ndim].shape + (1,),
w0_slice[:cls.ndim].shape]
cls._hess_return_shapes = [(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_2d[:cls.ndim].shape,
(cls.ndim,) + w0_3d[:cls.ndim].shape,
(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_slice[:cls.ndim].shape]
cls._valu_return_shapes = [x[1:] for x in cls._grad_return_shapes]
def test_unitsystem(self):
assert isinstance(self.potential.units, UnitSystem)
def test_energy(self):
assert self.ndim == self.potential.ndim
for arr,shp in zip(self.w0s, self._valu_return_shapes):
v = self.potential.energy(arr[:self.ndim])
assert v.shape == shp
g = self.potential.energy(arr[:self.ndim], t=0.1)
g = self.potential.energy(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.energy(arr[:self.ndim], t=t)
g = self.potential.energy(arr[:self.ndim], t=t*self.potential.units['time'])
def test_gradient(self):
for arr,shp in zip(self.w0s, self._grad_return_shapes):
g = self.potential.gradient(arr[:self.ndim])
assert g.shape == shp
g = self.potential.gradient(arr[:self.ndim], t=0.1)
g = self.potential.gradient(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.gradient(arr[:self.ndim], t=t)
g = self.potential.gradient(arr[:self.ndim], t=t*self.potential.units['time'])
def test_hessian(self):
for arr,shp in zip(self.w0s, self._hess_return_shapes):
g = self.potential.hessian(arr[:self.ndim])
assert g.shape == shp
g = self.potential.hessian(arr[:self.ndim], t=0.1)
g = self.potential.hessian(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.hessian(arr[:self.ndim], t=t)
g = self.potential.hessian(arr[:self.ndim], t=t*self.potential.units['time'])
def test_mass_enclosed(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.mass_enclosed(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.mass_enclosed(arr[:self.ndim], t=t)
g = self.potential.mass_enclosed(arr[:self.ndim], t=t*self.potential.units['time'])
def test_circular_velocity(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.circular_velocity(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.circular_velocity(arr[:self.ndim], t=t)
g = self.potential.circular_velocity(arr[:self.ndim], t=t*self.potential.units['time'])
def test_repr(self):
pot_repr = repr(self.potential)
if isinstance(self.potential.units, DimensionlessUnitSystem):
assert "dimensionless" in pot_repr
else:
assert str(self.potential.units['length']) in pot_repr
assert str(self.potential.units['time']) in pot_repr
assert str(self.potential.units['mass']) in pot_repr
for k in self.potential.parameters.keys():
assert "{}=".format(k) in pot_repr
def test_compare(self):
# skip if composite potentials
if len(self.potential.parameters) == 0:
return
other = self.potential.__class__(units=self.potential.units,
**self.potential.parameters)
assert other == self.potential
pars = self.potential.parameters.copy()
for k in pars.keys():
if k != 0:
pars[k] = 1.1*pars[k]
other = self.potential.__class__(units=self.potential.units, **pars)
assert other != self.potential
# check that comparing to non-potentials works
assert not self.potential == "sup"
assert not self.potential == None
def test_plot(self):
p = self.potential
if self.show_plots:
f = p.plot_contours(grid=(np.linspace(-10., 10., 100), 0., 0.),
labels=["X"])
# f.suptitle("slice off from 0., won't have cusp")
# f.savefig(os.path.join(plot_path, "contour_x.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
np.linspace(-10., 10., 100),
0.),
cmap='Blues')
# f.savefig(os.path.join(plot_path, "contour_xy.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
1.,
np.linspace(-10., 10., 100)),
cmap='Blues', labels=["X", "Z"])
# f.savefig(os.path.join(plot_path, "contour_xz.png"))
plt.show()
plt.close('all')
def test_save_load(self, tmpdir):
"""
Test writing to a YAML file, and reading back in
"""
fn = str(tmpdir.join("{}.yml".format(self.name)))
self.potential.save(fn)
p = load(fn)
p.energy(self.w0[:self.w0.size//2])
p.gradient(self.w0[:self.w0.size//2])
def test_numerical_gradient_vs_gradient(self):
"""
Check that the value of the implemented gradient function is close to a
numerically estimated value. This is to check the coded-up version.
"""
dx = 1E-3 * np.sqrt(np.sum(self.w0[:self.w0.size//2]**2))
max_x = np.sqrt(np.sum([x**2 for x in self.w0[:self.w0.size//2]]))
grid = np.linspace(-max_x,max_x,8)
grid = grid[grid != 0.]
grids = [grid for i in range(self.w0.size//2)]
xyz = np.ascontiguousarray(np.vstack(map(np.ravel, np.meshgrid(*grids))).T)
def energy_wrap(xyz):
xyz = np.ascontiguousarray(xyz[None])
return self.potential._energy(xyz, t=np.array([0.]))[0]
num_grad = np.zeros_like(xyz)
for i in range(xyz.shape[0]):
num_grad[i] = np.squeeze([partial_derivative(energy_wrap, xyz[i], dim_ix=dim_ix, n=1, dx=dx, order=5)
for dim_ix in range(self.w0.size//2)])
grad = self.potential._gradient(xyz, t=np.array([0.]))
assert np.allclose(num_grad, grad, rtol=self.tol)
def test_orbit_integration(self):
"""
Make we can integrate an orbit in this potential
"""
w0 = self.w0
w0 = np.vstack((w0,w0,w0)).T
t1 = time.time()
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
print("Integration time (10000 steps): {}".format(time.time() - t1))
if self.show_plots:
f = orbit.plot()
f.suptitle("Vector w0")
plt.show()
plt.close(f)
us = self.potential.units
w0 = PhaseSpacePosition(pos=w0[:self.ndim]*us['length'],
vel=w0[self.ndim:]*us['length']/us['time'])
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
if self.show_plots:
f = orbit.plot()
f.suptitle("Object w0")
plt.show()
plt.close(f)
def test_pickle(self, tmpdir):
fn = str(tmpdir.join("{}.pickle".format(self.name)))
with open(fn, "wb") as f:
pickle.dump(self.potential, f)
with open(fn, "rb") as f:
p = pickle.load(f)
p.energy(self.w0[:self.w0.size//2])
class CompositePotentialTestBase(PotentialTestBase):
@pytest.mark.skip(reason="Skip composite potential repr test")
def test_repr(self):
pass
@pytest.mark.skip(reason="Skip composite potential compare test")
def test_compare(self):
pass
|
[
"numpy.allclose",
"numpy.repeat",
"astropy.extern.six.moves.cPickle.dump",
"pytest.mark.skip",
"numpy.ascontiguousarray",
"scipy.misc.derivative",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.close",
"numpy.sum",
"numpy.vstack",
"time.time",
"numpy.meshgrid",
"numpy.all",
"astropy.extern.six.moves.cPickle.load",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] |
[((553, 579), 'numpy.array', 'np.array', (['point'], {'copy': '(True)'}), '(point, copy=True)\n', (561, 579), True, 'import numpy as np\n'), ((658, 700), 'scipy.misc.derivative', 'derivative', (['wraps', 'point[dim_ix]'], {}), '(wraps, point[dim_ix], **kwargs)\n', (668, 700), False, 'from scipy.misc import derivative\n'), ((10299, 10360), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skip composite potential repr test"""'}), "(reason='Skip composite potential repr test')\n", (10315, 10360), False, 'import pytest\n'), ((10405, 10469), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skip composite potential compare test"""'}), "(reason='Skip composite potential compare test')\n", (10421, 10469), False, 'import pytest\n'), ((1026, 1042), 'numpy.array', 'np.array', (['cls.w0'], {}), '(cls.w0)\n', (1034, 1042), True, 'import numpy as np\n'), ((1229, 1275), 'numpy.repeat', 'np.repeat', (['cls.w0[:, None]'], {'axis': '(1)', 'repeats': '(16)'}), '(cls.w0[:, None], axis=1, repeats=16)\n', (1238, 1275), True, 'import numpy as np\n'), ((1291, 1337), 'numpy.repeat', 'np.repeat', (['w0_2d[..., None]'], {'axis': '(2)', 'repeats': '(8)'}), '(w0_2d[..., None], axis=2, repeats=8)\n', (1300, 1337), True, 'import numpy as np\n'), ((8174, 8203), 'numpy.linspace', 'np.linspace', (['(-max_x)', 'max_x', '(8)'], {}), '(-max_x, max_x, 8)\n', (8185, 8203), True, 'import numpy as np\n'), ((8542, 8560), 'numpy.zeros_like', 'np.zeros_like', (['xyz'], {}), '(xyz)\n', (8555, 8560), True, 'import numpy as np\n'), ((8869, 8911), 'numpy.allclose', 'np.allclose', (['num_grad', 'grad'], {'rtol': 'self.tol'}), '(num_grad, grad, rtol=self.tol)\n', (8880, 8911), True, 'import numpy as np\n'), ((9104, 9115), 'time.time', 'time.time', ([], {}), '()\n', (9113, 9115), False, 'import time\n'), ((4228, 4243), 'numpy.all', 'np.all', (['(g > 0.0)'], {}), '(g > 0.0)\n', (4234, 4243), True, 'import numpy as np\n'), ((4853, 4868), 'numpy.all', 'np.all', (['(g > 0.0)'], {}), '(g > 0.0)\n', (4859, 4868), True, 'import numpy as np\n'), ((7423, 7433), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7431, 7433), True, 'import matplotlib.pyplot as plt\n'), ((7446, 7462), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7455, 7462), True, 'import matplotlib.pyplot as plt\n'), ((8107, 8162), 'numpy.sum', 'np.sum', (['[(x ** 2) for x in self.w0[:self.w0.size // 2]]'], {}), '([(x ** 2) for x in self.w0[:self.w0.size // 2]])\n', (8113, 8162), True, 'import numpy as np\n'), ((8422, 8453), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['xyz[None]'], {}), '(xyz[None])\n', (8442, 8453), True, 'import numpy as np\n'), ((9066, 9089), 'numpy.vstack', 'np.vstack', (['(w0, w0, w0)'], {}), '((w0, w0, w0))\n', (9075, 9089), True, 'import numpy as np\n'), ((9450, 9460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9458, 9460), True, 'import matplotlib.pyplot as plt\n'), ((9473, 9485), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (9482, 9485), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9927, 9929), True, 'import matplotlib.pyplot as plt\n'), ((9942, 9954), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (9951, 9954), True, 'import matplotlib.pyplot as plt\n'), ((10098, 10128), 'astropy.extern.six.moves.cPickle.dump', 'pickle.dump', (['self.potential', 'f'], {}), '(self.potential, f)\n', (10109, 10128), True, 'from astropy.extern.six.moves import cPickle as pickle\n'), ((10180, 10194), 'astropy.extern.six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10191, 10194), True, 'from astropy.extern.six.moves import cPickle as pickle\n'), ((8045, 8085), 'numpy.sum', 'np.sum', (['(self.w0[:self.w0.size // 2] ** 2)'], {}), '(self.w0[:self.w0.size // 2] ** 2)\n', (8051, 8085), True, 'import numpy as np\n'), ((8837, 8852), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8845, 8852), True, 'import numpy as np\n'), ((9325, 9336), 'time.time', 'time.time', ([], {}), '()\n', (9334, 9336), False, 'import time\n'), ((6596, 6625), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (6607, 6625), True, 'import numpy as np\n'), ((6848, 6877), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (6859, 6877), True, 'import numpy as np\n'), ((6915, 6944), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (6926, 6944), True, 'import numpy as np\n'), ((7139, 7168), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (7150, 7168), True, 'import numpy as np\n'), ((7248, 7277), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (7259, 7277), True, 'import numpy as np\n'), ((8348, 8367), 'numpy.meshgrid', 'np.meshgrid', (['*grids'], {}), '(*grids)\n', (8359, 8367), True, 'import numpy as np\n'), ((8503, 8518), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8511, 8518), True, 'import numpy as np\n'), ((2734, 2747), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (2742, 2747), True, 'import numpy as np\n'), ((3283, 3296), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (3291, 3296), True, 'import numpy as np\n'), ((3832, 3845), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (3840, 3845), True, 'import numpy as np\n'), ((4437, 4450), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (4445, 4450), True, 'import numpy as np\n'), ((5070, 5083), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (5078, 5083), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import json
import time
import pandas as pd
import tensorflow as tf
import numpy as np
import math
from decimal import Decimal
import matplotlib.pyplot as plt
from agents.ornstein_uhlenbeck import OrnsteinUhlenbeckActionNoise
eps=10e-8
epochs=0
M=0
class StockTrader():
def __init__(self):
self.reset()
def reset(self):
self.wealth = 10e3
self.total_reward = 0
self.ep_ave_max_q = 0
self.loss = 0
self.actor_loss=0
self.wealth_history = []
self.r_history = []
self.w_history = []
self.p_history = []
self.noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(M))
def update_summary(self,loss,r,q_value,actor_loss,w,p):
self.loss += loss
self.actor_loss+=actor_loss
self.total_reward+=r
self.ep_ave_max_q += q_value
self.r_history.append(r)
self.wealth = self.wealth * math.exp(r)
self.wealth_history.append(self.wealth)
self.w_history.extend([','.join([str(Decimal(str(w0)).quantize(Decimal('0.00'))) for w0 in w.tolist()[0]])])
self.p_history.extend([','.join([str(Decimal(str(p0)).quantize(Decimal('0.000'))) for p0 in p.tolist()])])
def write(self,epoch):
wealth_history = pd.Series(self.wealth_history)
r_history = pd.Series(self.r_history)
w_history = pd.Series(self.w_history)
p_history = pd.Series(self.p_history)
history = pd.concat([wealth_history, r_history, w_history, p_history], axis=1)
history.to_csv('result' + str(epoch) + '-' + str(math.exp(np.sum(self.r_history)) * 100) + '.csv')
def print_result(self,epoch,agent):
self.total_reward=math.exp(self.total_reward) * 100
print('*-----Episode: {:d}, Reward:{:.6f}%, ep_ave_max_q:{:.2f}, actor_loss:{:2f}-----*'.format(epoch, self.total_reward,self.ep_ave_max_q,self.actor_loss))
agent.write_summary(self.loss, self.total_reward,self.ep_ave_max_q,self.actor_loss, epoch)
agent.save_model()
def plot_result(self):
pd.Series(self.wealth_history).plot()
plt.show()
def action_processor(self,a,ratio):
a = np.clip(a + self.noise() * ratio, 0, 1)
a = a / (a.sum() + eps)
return a
def parse_info(info):
return info['reward'],info['continue'],info[ 'next state'],info['weight vector'],info ['price'],info['risk']
def traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable):
info = env.step(None,None)
r,contin,s,w1,p,risk=parse_info(info)
contin=1
t=0
while contin:
w2 = agent.predict(s,w1)
if noise_flag=='True':
w2=stocktrader.action_processor(w2,(epochs-epoch)/epochs)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p,risk = parse_info(env_info)
if framework=='PG':
agent.save_transition(s,p,w2,w1)
else:
agent.save_transition(s, w2, r-risk, contin, s_next, w1)
loss, q_value,actor_loss=0,0,0
if framework=='DDPG':
if not contin and trainable=="True":
agent_info= agent.train(method,epoch)
loss, q_value=agent_info["critic_loss"],agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PPO':
if not contin and trainable=="True":
agent_info = agent.train(method, epoch)
loss, q_value = agent_info["critic_loss"], agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PG':
if not contin and trainable=="True":
agent.train()
stocktrader.update_summary(loss,r,q_value,actor_loss,w2,p)
s = s_next
t=t+1
def backtest(agent,env):
print("starting to backtest......")
from agents.UCRP import UCRP
from agents.Winner import WINNER
from agents.Losser import LOSSER
agents=[]
agents.append(agent)
agents.append(WINNER())
agents.append(UCRP())
agents.append(LOSSER())
labels=['PG','Winner','UCRP','Losser']
wealths_result=[]
rs_result=[]
for i,agent in enumerate(agents):
info = env.step(None, None)
r, contin, s, w1, p, risk = parse_info(info)
contin = 1
wealth=10000
wealths = [wealth]
rs=[1]
while contin:
w2 = agent.predict(s, w1)
if i==0:
print(w2)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p, risk = parse_info(env_info)
wealth=wealth*math.exp(r)
rs.append(math.exp(r)-1)
wealths.append(wealth)
s=s_next
print('finish one agent')
wealths_result.append(wealths)
rs_result.append(rs)
for i in range(len(agents)):
plt.plot(wealths_result[i],label=labels[i])
print(labels[i],' ',np.mean(rs_result[i]),' ',np.std(rs_result[i]))
plt.legend()
plt.show()
def parse_config(config,mode):
codes = config["session"]["codes"]
start_date = config["session"]["start_date"]
end_date = config["session"]["end_date"]
features = config["session"]["features"]
agent_config = config["session"]["agents"]
market = config["session"]["market_types"]
noise_flag, record_flag, plot_flag=config["session"]["noise_flag"],config["session"]["record_flag"],config["session"]["plot_flag"]
predictor, framework, window_length = agent_config
reload_flag, trainable=config["session"]['reload_flag'],config["session"]['trainable']
method=config["session"]['method']
global epochs
epochs = int(config["session"]["epochs"])
if mode=='test':
record_flag='True'
noise_flag='False'
plot_flag='True'
reload_flag='True'
trainable='False'
method='model_free'
print("*--------------------Training Status-------------------*")
print('Codes:',codes)
print("Date from",start_date,' to ',end_date)
print('Features:',features)
print("Agent:Noise(",noise_flag,')---Recoed(',noise_flag,')---Plot(',plot_flag,')')
print("Market Type:",market)
print("Predictor:",predictor," Framework:", framework," Window_length:",window_length)
print("Epochs:",epochs)
print("Trainable:",trainable)
print("Reloaded Model:",reload_flag)
print("Method",method)
print("Noise_flag",noise_flag)
print("Record_flag",record_flag)
print("Plot_flag",plot_flag)
return codes,start_date,end_date,features,agent_config,market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method
def session(config,mode):
from data.environment import Environment
codes, start_date, end_date, features, agent_config, market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method=parse_config(config,mode)
env = Environment(start_date, end_date, codes, features, int(window_length),market)
global M
M=len(codes)+1
if framework == 'DDPG':
print("*-----------------Loading DDPG Agent---------------------*")
from agents.ddpg import DDPG
agent = DDPG(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PPO':
print("*-----------------Loading PPO Agent---------------------*")
from agents.ppo import PPO
agent = PPO(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PG':
print("*-----------------Loading PG Agent---------------------*")
from agents.pg import PG
agent = PG(len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
stocktrader=StockTrader()
if mode=='train':
print("Training with {:d}".format(epochs))
for epoch in range(epochs):
print("Now we are at epoch", epoch)
traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable)
if record_flag=='True':
stocktrader.write(epoch)
if plot_flag=='True':
stocktrader.plot_result()
agent.reset_buffer()
stocktrader.print_result(epoch,agent)
stocktrader.reset()
elif mode=='test':
backtest(agent, env)
def build_parser():
parser = ArgumentParser(description='Provide arguments for training different DDPG or PPO models in Portfolio Management')
parser.add_argument("--mode",dest="mode",help="download(China), train, test",metavar="MODE", default="train",required=True)
parser.add_argument("--model",dest="model",help="DDPG,PPO",metavar="MODEL", default="DDPG",required=False)
return parser
def main():
parser = build_parser()
args=vars(parser.parse_args())
with open('config.json') as f:
config=json.load(f)
if args['mode']=='download':
from data.download_data import DataDownloader
data_downloader=DataDownloader(config)
data_downloader.save_data()
else:
session(config,args['mode'])
if __name__=="__main__":
main()
|
[
"pandas.Series",
"numpy.mean",
"data.download_data.DataDownloader",
"argparse.ArgumentParser",
"decimal.Decimal",
"agents.Winner.WINNER",
"matplotlib.pyplot.plot",
"numpy.sum",
"agents.UCRP.UCRP",
"numpy.zeros",
"numpy.std",
"json.load",
"math.exp",
"agents.Losser.LOSSER",
"pandas.concat",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((5269, 5281), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5279, 5281), True, 'import matplotlib.pyplot as plt\n'), ((5287, 5297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5295, 5297), True, 'import matplotlib.pyplot as plt\n'), ((8870, 8993), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Provide arguments for training different DDPG or PPO models in Portfolio Management"""'}), "(description=\n 'Provide arguments for training different DDPG or PPO models in Portfolio Management'\n )\n", (8884, 8993), False, 'from argparse import ArgumentParser\n'), ((1368, 1398), 'pandas.Series', 'pd.Series', (['self.wealth_history'], {}), '(self.wealth_history)\n', (1377, 1398), True, 'import pandas as pd\n'), ((1420, 1445), 'pandas.Series', 'pd.Series', (['self.r_history'], {}), '(self.r_history)\n', (1429, 1445), True, 'import pandas as pd\n'), ((1467, 1492), 'pandas.Series', 'pd.Series', (['self.w_history'], {}), '(self.w_history)\n', (1476, 1492), True, 'import pandas as pd\n'), ((1514, 1539), 'pandas.Series', 'pd.Series', (['self.p_history'], {}), '(self.p_history)\n', (1523, 1539), True, 'import pandas as pd\n'), ((1559, 1627), 'pandas.concat', 'pd.concat', (['[wealth_history, r_history, w_history, p_history]'], {'axis': '(1)'}), '([wealth_history, r_history, w_history, p_history], axis=1)\n', (1568, 1627), True, 'import pandas as pd\n'), ((2221, 2231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((4266, 4274), 'agents.Winner.WINNER', 'WINNER', ([], {}), '()\n', (4272, 4274), False, 'from agents.Winner import WINNER\n'), ((4295, 4301), 'agents.UCRP.UCRP', 'UCRP', ([], {}), '()\n', (4299, 4301), False, 'from agents.UCRP import UCRP\n'), ((4322, 4330), 'agents.Losser.LOSSER', 'LOSSER', ([], {}), '()\n', (4328, 4330), False, 'from agents.Losser import LOSSER\n'), ((5139, 5183), 'matplotlib.pyplot.plot', 'plt.plot', (['wealths_result[i]'], {'label': 'labels[i]'}), '(wealths_result[i], label=labels[i])\n', (5147, 5183), True, 'import matplotlib.pyplot as plt\n'), ((9378, 9390), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9387, 9390), False, 'import json\n'), ((1017, 1028), 'math.exp', 'math.exp', (['r'], {}), '(r)\n', (1025, 1028), False, 'import math\n'), ((1806, 1833), 'math.exp', 'math.exp', (['self.total_reward'], {}), '(self.total_reward)\n', (1814, 1833), False, 'import math\n'), ((5214, 5235), 'numpy.mean', 'np.mean', (['rs_result[i]'], {}), '(rs_result[i])\n', (5221, 5235), True, 'import numpy as np\n'), ((5242, 5262), 'numpy.std', 'np.std', (['rs_result[i]'], {}), '(rs_result[i])\n', (5248, 5262), True, 'import numpy as np\n'), ((9517, 9539), 'data.download_data.DataDownloader', 'DataDownloader', (['config'], {}), '(config)\n', (9531, 9539), False, 'from data.download_data import DataDownloader\n'), ((738, 749), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (746, 749), True, 'import numpy as np\n'), ((2174, 2204), 'pandas.Series', 'pd.Series', (['self.wealth_history'], {}), '(self.wealth_history)\n', (2183, 2204), True, 'import pandas as pd\n'), ((4881, 4892), 'math.exp', 'math.exp', (['r'], {}), '(r)\n', (4889, 4892), False, 'import math\n'), ((4916, 4927), 'math.exp', 'math.exp', (['r'], {}), '(r)\n', (4924, 4927), False, 'import math\n'), ((1150, 1165), 'decimal.Decimal', 'Decimal', (['"""0.00"""'], {}), "('0.00')\n", (1157, 1165), False, 'from decimal import Decimal\n'), ((1268, 1284), 'decimal.Decimal', 'Decimal', (['"""0.000"""'], {}), "('0.000')\n", (1275, 1284), False, 'from decimal import Decimal\n'), ((1695, 1717), 'numpy.sum', 'np.sum', (['self.r_history'], {}), '(self.r_history)\n', (1701, 1717), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2016-03-16 11:28:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-04-04 13:50:50
"""
Batch export freesurfer results to animated gifs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as op
import subprocess as sp
from shutil import rmtree
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from tempfile import mkdtemp
from errno import EEXIST
import glob
from six import string_types
import numpy as np
import nibabel as nb
from skimage import exposure
def main():
"""Entry point"""
parser = ArgumentParser(description='Batch export freesurfer results to animated gifs',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-S', '--subjects-dir', action='store', default=os.getcwd())
g_input.add_argument('-s', '--subject-id', action='store')
g_input.add_argument('-t', '--temp-dir', action='store')
g_input.add_argument('--keep-temp', action='store_true', default=False)
g_input.add_argument('--zoom', action='store_true', default=False)
g_input.add_argument('--hist-eq', action='store_true', default=False)
g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif')
opts = parser.parse_args()
if opts.temp_dir is None:
tmpdir = mkdtemp()
else:
tmpdir = op.abspath(opts.temp_dir)
try:
os.makedirs(tmpdir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
out_dir = op.abspath(opts.output_dir)
try:
os.makedirs(out_dir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
subjects_dir = op.abspath(opts.subjects_dir)
subject_list = opts.subject_id
if subject_list is None:
subject_list = [name for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
elif isinstance(subject_list, string_types):
if '*' not in subject_list:
subject_list = [subject_list]
else:
all_dirs = [op.join(subjects_dir, name) for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
pattern = glob.glob(op.abspath(op.join(subjects_dir, opts.subject_id)))
subject_list = list(set(pattern).intersection(set(all_dirs)))
environ = os.environ.copy()
environ['SUBJECTS_DIR'] = subjects_dir
# tcl_file = pkgr.resource_filename('structural_dhcp_mriqc', 'data/fsexport.tcl')
tcl_contents = """
SetOrientation 0
SetCursor 0 128 128 128
SetDisplayFlag 3 0
SetDisplayFlag 22 1
set i 0
"""
for sub_path in subject_list:
subid = op.basename(sub_path)
tmp_sub = op.join(tmpdir, subid)
try:
os.makedirs(tmp_sub)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
niifile = op.join(tmp_sub, '%s.nii.gz') % subid
ref_file = op.join(sub_path, 'mri', 'T1.mgz')
sp.call(['mri_convert', op.join(sub_path, 'mri', 'norm.mgz'), niifile],
cwd=tmp_sub)
data = nb.load(niifile).get_data()
data[data > 0] = 1
# Compute brain bounding box
indexes = np.argwhere(data)
bbox_min = indexes.min(0)
bbox_max = indexes.max(0) + 1
center = np.average([bbox_min, bbox_max], axis=0)
if opts.hist_eq:
modnii = op.join(tmp_sub, '%s.nii.gz' % subid)
ref_file = op.join(tmp_sub, '%s.mgz' % subid)
img = nb.load(niifile)
data = exposure.equalize_adapthist(img.get_data(), clip_limit=0.03)
nb.Nifti1Image(data, img.get_affine(), img.get_header()).to_filename(modnii)
sp.call(['mri_convert', modnii, ref_file], cwd=tmp_sub)
if not opts.zoom:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, '%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),
'%s/%s.gif' % (out_dir, subid)])
else:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-lh-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=environ)
# Export tiffs for right hemisphere
tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-rh-' % (tmp_sub, subid) + '%03d.tif" $slice]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub, subid),
'%s/%s-lh.gif' % (out_dir, subid)])
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub, subid),
'%s/%s-rh.gif' % (out_dir, subid)])
if not opts.keep_temp:
try:
rmtree(tmp_sub)
except:
pass
if __name__ == '__main__':
main()
|
[
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.average",
"nibabel.load",
"os.path.join",
"os.environ.copy",
"os.getcwd",
"numpy.argwhere",
"tempfile.mkdtemp",
"os.path.basename",
"subprocess.call",
"shutil.rmtree",
"os.path.abspath"
] |
[((752, 878), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Batch export freesurfer results to animated gifs"""', 'formatter_class': 'RawTextHelpFormatter'}), "(description=\n 'Batch export freesurfer results to animated gifs', formatter_class=\n RawTextHelpFormatter)\n", (766, 878), False, 'from argparse import ArgumentParser\n'), ((1810, 1837), 'os.path.abspath', 'op.abspath', (['opts.output_dir'], {}), '(opts.output_dir)\n', (1820, 1837), True, 'import os.path as op\n'), ((1977, 2006), 'os.path.abspath', 'op.abspath', (['opts.subjects_dir'], {}), '(opts.subjects_dir)\n', (1987, 2006), True, 'import os.path as op\n'), ((2682, 2699), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2697, 2699), False, 'import os\n'), ((1594, 1603), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (1601, 1603), False, 'from tempfile import mkdtemp\n'), ((1631, 1656), 'os.path.abspath', 'op.abspath', (['opts.temp_dir'], {}), '(opts.temp_dir)\n', (1641, 1656), True, 'import os.path as op\n'), ((1855, 1875), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (1866, 1875), False, 'import os\n'), ((2995, 3016), 'os.path.basename', 'op.basename', (['sub_path'], {}), '(sub_path)\n', (3006, 3016), True, 'import os.path as op\n'), ((3035, 3057), 'os.path.join', 'op.join', (['tmpdir', 'subid'], {}), '(tmpdir, subid)\n', (3042, 3057), True, 'import os.path as op\n'), ((3273, 3307), 'os.path.join', 'op.join', (['sub_path', '"""mri"""', '"""T1.mgz"""'], {}), "(sub_path, 'mri', 'T1.mgz')\n", (3280, 3307), True, 'import os.path as op\n'), ((3543, 3560), 'numpy.argwhere', 'np.argwhere', (['data'], {}), '(data)\n', (3554, 3560), True, 'import numpy as np\n'), ((3650, 3690), 'numpy.average', 'np.average', (['[bbox_min, bbox_max]'], {'axis': '(0)'}), '([bbox_min, bbox_max], axis=0)\n', (3660, 3690), True, 'import numpy as np\n'), ((1020, 1031), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1029, 1031), False, 'import os\n'), ((1682, 1701), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (1693, 1701), False, 'import os\n'), ((3083, 3103), 'os.makedirs', 'os.makedirs', (['tmp_sub'], {}), '(tmp_sub)\n', (3094, 3103), False, 'import os\n'), ((3216, 3245), 'os.path.join', 'op.join', (['tmp_sub', '"""%s.nii.gz"""'], {}), "(tmp_sub, '%s.nii.gz')\n", (3223, 3245), True, 'import os.path as op\n'), ((3738, 3775), 'os.path.join', 'op.join', (['tmp_sub', "('%s.nii.gz' % subid)"], {}), "(tmp_sub, '%s.nii.gz' % subid)\n", (3745, 3775), True, 'import os.path as op\n'), ((3799, 3833), 'os.path.join', 'op.join', (['tmp_sub', "('%s.mgz' % subid)"], {}), "(tmp_sub, '%s.mgz' % subid)\n", (3806, 3833), True, 'import os.path as op\n'), ((3852, 3868), 'nibabel.load', 'nb.load', (['niifile'], {}), '(niifile)\n', (3859, 3868), True, 'import nibabel as nb\n'), ((4050, 4105), 'subprocess.call', 'sp.call', (["['mri_convert', modnii, ref_file]"], {'cwd': 'tmp_sub'}), "(['mri_convert', modnii, ref_file], cwd=tmp_sub)\n", (4057, 4105), True, 'import subprocess as sp\n'), ((4204, 4238), 'os.path.join', 'op.join', (['tmp_sub', "('%s.tcl' % subid)"], {}), "(tmp_sub, '%s.tcl' % subid)\n", (4211, 4238), True, 'import os.path as op\n'), ((4778, 4888), 'subprocess.call', 'sp.call', (["['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl',\n tcl_file]"], {'env': 'environ'}), "(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial',\n '-tcl', tcl_file], env=environ)\n", (4785, 4888), True, 'import subprocess as sp\n'), ((4935, 5055), 'subprocess.call', 'sp.call', (["['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),\n '%s/%s.gif' % (out_dir, subid)]"], {}), "(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub,\n subid), '%s/%s.gif' % (out_dir, subid)])\n", (4942, 5055), True, 'import subprocess as sp\n'), ((5158, 5195), 'os.path.join', 'op.join', (['tmp_sub', "('lh-%s.tcl' % subid)"], {}), "(tmp_sub, 'lh-%s.tcl' % subid)\n", (5165, 5195), True, 'import os.path as op\n'), ((5883, 5970), 'subprocess.call', 'sp.call', (["['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file]"], {'env': 'environ'}), "(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=\n environ)\n", (5890, 5970), True, 'import subprocess as sp\n'), ((6038, 6075), 'os.path.join', 'op.join', (['tmp_sub', "('rh-%s.tcl' % subid)"], {}), "(tmp_sub, 'rh-%s.tcl' % subid)\n", (6045, 6075), True, 'import os.path as op\n'), ((6767, 6854), 'subprocess.call', 'sp.call', (["['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file]"], {'env': 'environ'}), "(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=\n environ)\n", (6774, 6854), True, 'import subprocess as sp\n'), ((6901, 7028), 'subprocess.call', 'sp.call', (["['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub,\n subid), '%s/%s-lh.gif' % (out_dir, subid)]"], {}), "(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (\n tmp_sub, subid), '%s/%s-lh.gif' % (out_dir, subid)])\n", (6908, 7028), True, 'import subprocess as sp\n'), ((7057, 7184), 'subprocess.call', 'sp.call', (["['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub,\n subid), '%s/%s-rh.gif' % (out_dir, subid)]"], {}), "(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (\n tmp_sub, subid), '%s/%s-rh.gif' % (out_dir, subid)])\n", (7064, 7184), True, 'import subprocess as sp\n'), ((2112, 2136), 'os.listdir', 'os.listdir', (['subjects_dir'], {}), '(subjects_dir)\n', (2122, 2136), False, 'import os\n'), ((3340, 3376), 'os.path.join', 'op.join', (['sub_path', '"""mri"""', '"""norm.mgz"""'], {}), "(sub_path, 'mri', 'norm.mgz')\n", (3347, 3376), True, 'import os.path as op\n'), ((3432, 3448), 'nibabel.load', 'nb.load', (['niifile'], {}), '(niifile)\n', (3439, 3448), True, 'import nibabel as nb\n'), ((7267, 7282), 'shutil.rmtree', 'rmtree', (['tmp_sub'], {}), '(tmp_sub)\n', (7273, 7282), False, 'from shutil import rmtree\n'), ((2173, 2205), 'os.path.join', 'os.path.join', (['subjects_dir', 'name'], {}), '(subjects_dir, name)\n', (2185, 2205), False, 'import os\n'), ((2373, 2400), 'os.path.join', 'op.join', (['subjects_dir', 'name'], {}), '(subjects_dir, name)\n', (2380, 2400), True, 'import os.path as op\n'), ((2413, 2437), 'os.listdir', 'os.listdir', (['subjects_dir'], {}), '(subjects_dir)\n', (2423, 2437), False, 'import os\n'), ((2552, 2590), 'os.path.join', 'op.join', (['subjects_dir', 'opts.subject_id'], {}), '(subjects_dir, opts.subject_id)\n', (2559, 2590), True, 'import os.path as op\n'), ((2474, 2506), 'os.path.join', 'os.path.join', (['subjects_dir', 'name'], {}), '(subjects_dir, name)\n', (2486, 2506), False, 'import os\n')]
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from django.shortcuts import render
from django.db.models import Count
from django.db.models.functions import Trim, Lower
from django.urls import reverse_lazy
from .models import Blog
from .forms import EditBlogForm
def tag_count(blog_user, topn=0):
# TODO Move to model manager
raw_tags = (
Blog.blog.filter(user=blog_user)
.order_by("tag")
.values("tag")
.annotate(count=Count("tag"), tag_new=Trim(Lower("tag")))
)
count_tags = dict()
# TODO Split by tags with "," and those without
for record in raw_tags:
for tag in record["tag_new"].split(","):
k = tag.strip()
if len(k) > 0:
count_tags[k] = count_tags.get(k, 0) + record["count"]
# TODO Sort by value (desc) and then key (ascend) for common values
if topn == 0:
return {
k: count_tags[k]
for k in sorted(count_tags, key=count_tags.get, reverse=True)
}
else:
return {
k: count_tags[k]
for k in sorted(count_tags, key=count_tags.get, reverse=True)[:topn]
}
# Create your views here.
def BlogHome(request):
blog_all = Blog.blog.filter(user=request.user)
blogs = blog_all.order_by("-modified")[:3]
blog_count = blog_all.count()
tag_sorted = tag_count(request.user, topn=5)
return render(
request,
"blog/blog_home.html",
{"blogs": blogs, "tags": tag_sorted, "blog_count": blog_count},
)
class BlogListView(PermissionRequiredMixin, ListView):
model = Blog
paginate_by = 3
template_name = "blog/blog_list.html"
permission_required = "blog.view_blog"
def get_queryset(self):
return Blog.blog.filter(user=self.request.user)
def BlogAllTagsView(request):
# TODO turn into ListView with paginate
tag_sorted = tag_count(request.user)
return render(request, "blog/blog_tags.html", {"tags": tag_sorted})
class BlogTagListView(PermissionRequiredMixin, ListView):
model = Blog
paginate_by = 3
template_name = "blog/blog_list.html"
permission_required = "blog.view_blog"
def get_queryset(self):
return Blog.blog.filter(tag__contains=self.kwargs["tag_name"], user=self.request.user)
class BlogDetailView(PermissionRequiredMixin, DetailView):
model = Blog
template_name = "blog/blog_detail.html"
permission_required = "blog.view_blog"
class BlogCreateView(PermissionRequiredMixin, LoginRequiredMixin, CreateView):
form_class = EditBlogForm
model = Blog
action = "Add"
template_name = "blog/blog_form.html"
permission_required = "blog.add_blog"
class BlogUpdateView(PermissionRequiredMixin, LoginRequiredMixin, UpdateView):
form_class = EditBlogForm
model = Blog
action = "Edit"
template_name = "blog/blog_form.html"
permission_required = "blog.change_blog"
class BlogDeleteView(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = Blog
success_url = reverse_lazy("blog:list")
permission_required = "blog.delete_blog"
|
[
"django.shortcuts.render",
"django.db.models.Count",
"django.db.models.functions.Lower",
"django.urls.reverse_lazy"
] |
[((1590, 1696), 'django.shortcuts.render', 'render', (['request', '"""blog/blog_home.html"""', "{'blogs': blogs, 'tags': tag_sorted, 'blog_count': blog_count}"], {}), "(request, 'blog/blog_home.html', {'blogs': blogs, 'tags': tag_sorted,\n 'blog_count': blog_count})\n", (1596, 1696), False, 'from django.shortcuts import render\n'), ((2116, 2176), 'django.shortcuts.render', 'render', (['request', '"""blog/blog_tags.html"""', "{'tags': tag_sorted}"], {}), "(request, 'blog/blog_tags.html', {'tags': tag_sorted})\n", (2122, 2176), False, 'from django.shortcuts import render\n'), ((3230, 3255), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""blog:list"""'], {}), "('blog:list')\n", (3242, 3255), False, 'from django.urls import reverse_lazy\n'), ((651, 663), 'django.db.models.Count', 'Count', (['"""tag"""'], {}), "('tag')\n", (656, 663), False, 'from django.db.models import Count\n'), ((678, 690), 'django.db.models.functions.Lower', 'Lower', (['"""tag"""'], {}), "('tag')\n", (683, 690), False, 'from django.db.models.functions import Trim, Lower\n')]
|
import sys
from typing import Generator
from typing import List
from typing import Optional
import pytest
from _pytest.pytester import Pytester
def test_one_dir_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub\n")
result = pytester.runpytest("test_foo.py")
assert result.ret == 0
result.assert_outcomes(passed=1)
def test_two_dirs_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub sub2\n")
result = pytester.runpytest("test_foo.py", "test_bar.py")
assert result.ret == 0
result.assert_outcomes(passed=2)
def test_unconfigure_unadded_dir_pythonpath(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_configure(config):
config.addinivalue_line("pythonpath", "sub")
"""
)
pytester.makepyfile(
"""
import sys
def test_something():
pass
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_clean_up_pythonpath(pytester: Pytester) -> None:
"""Test that the srcpaths plugin cleans up after itself."""
pytester.makefile(".ini", pytest="[pytest]\npythonpath=I_SHALL_BE_REMOVED\n")
pytester.makepyfile(test_foo="""def test_foo(): pass""")
before: Optional[List[str]] = None
after: Optional[List[str]] = None
class Plugin:
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_unconfigure(self) -> Generator[None, None, None]:
nonlocal before, after
before = sys.path.copy()
yield
after = sys.path.copy()
result = pytester.runpytest_inprocess(plugins=[Plugin()])
assert result.ret == 0
assert before is not None
assert after is not None
assert any("I_SHALL_BE_REMOVED" in entry for entry in before)
assert not any("I_SHALL_BE_REMOVED" in entry for entry in after)
|
[
"sys.path.copy",
"pytest.hookimpl"
] |
[((1463, 1511), 'pytest.hookimpl', 'pytest.hookimpl', ([], {'hookwrapper': '(True)', 'tryfirst': '(True)'}), '(hookwrapper=True, tryfirst=True)\n', (1478, 1511), False, 'import pytest\n'), ((1637, 1652), 'sys.path.copy', 'sys.path.copy', ([], {}), '()\n', (1650, 1652), False, 'import sys\n'), ((1691, 1706), 'sys.path.copy', 'sys.path.copy', ([], {}), '()\n', (1704, 1706), False, 'import sys\n')]
|
#!/usr/bin/env python2
import paho.mqtt.client as mqtt
import time
import Adafruit_DHT
from configparser import ConfigParser
import json
config = ConfigParser(delimiters=('=', ))
config.read('config.ini')
sensor_type = config['sensor'].get('type', 'dht22').lower()
if sensor_type == 'dht22':
sensor = Adafruit_DHT.DHT22
elif sensor_type == 'dht11':
sensor = Adafruit_DHT.dht11
elif sensor_type == 'am2302':
sensor = Adafruit_DHT.AM2302
else:
raise Exception('Supported sensor types: DHT22, DHT11, AM2302')
pin = config['sensor'].get('pin', 10)
topic = config['mqtt'].get('topic', 'temperature/dht22')
decim_digits = config['sensor'].getint('decimal_digits', 2)
sleep_time = config['sensor'].getint('interval', 60)
user = config['mqtt'].get('user', 'guest')
password = config['mqtt'].get('password', '<PASSWORD>')
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code {}".format(rc))
client = mqtt.Client()
client.on_connect = on_connect
client.username_pw_set(user, password)
client.connect(config['mqtt'].get('hostname', 'homeassistant'),
config['mqtt'].getint('port', 1883),
config['mqtt'].getint('timeout', 60))
client.loop_start()
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
data = {'temperature': round(temperature, decim_digits),
'humidity': round(humidity, decim_digits)}
client.publish(topic, json.dumps(data))
print('Published. Sleeping ...')
else:
print('Failed to get reading. Skipping ...')
time.sleep(sleep_time)
|
[
"configparser.ConfigParser",
"paho.mqtt.client.Client",
"json.dumps",
"time.sleep",
"Adafruit_DHT.read_retry"
] |
[((148, 179), 'configparser.ConfigParser', 'ConfigParser', ([], {'delimiters': "('=',)"}), "(delimiters=('=',))\n", (160, 179), False, 'from configparser import ConfigParser\n'), ((1024, 1037), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (1035, 1037), True, 'import paho.mqtt.client as mqtt\n'), ((1339, 1375), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['sensor', 'pin'], {}), '(sensor, pin)\n', (1362, 1375), False, 'import Adafruit_DHT\n'), ((1717, 1739), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (1727, 1739), False, 'import time\n'), ((1589, 1605), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1599, 1605), False, 'import json\n')]
|
import unittest
import mplisp.evaluator as evaluator
class TestListMap(unittest.TestCase):
def map_test(self):
input1 = """
(map (lambda (x) (* 2 x)) (list 1 2 3))
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[0], [2, 4, 6])
def map_test_2(self):
input1 = """
(import "sys")
(def a (list 1 2 3 4))
(map (lambda (x) (* 2 x)) a)
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[2], [2, 4, 6, 8])
|
[
"mplisp.evaluator.evaluate"
] |
[((223, 249), 'mplisp.evaluator.evaluate', 'evaluator.evaluate', (['input1'], {}), '(input1)\n', (241, 249), True, 'import mplisp.evaluator as evaluator\n'), ((475, 501), 'mplisp.evaluator.evaluate', 'evaluator.evaluate', (['input1'], {}), '(input1)\n', (493, 501), True, 'import mplisp.evaluator as evaluator\n')]
|
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django_redis.cache import RedisCache as PlainRedisCache
from redis_lock import Lock
from redis_lock import reset_all
class RedisCache(PlainRedisCache):
@property
def __client(self):
try:
return self.client.get_client()
except Exception as exc:
raise NotImplementedError(
"RedisCache doesn't have a raw client: %r. "
"Use 'redis_cache.client.DefaultClient' as the CLIENT_CLASS !" % exc
)
def lock(self, key, expire=None, id=None):
return Lock(self.__client, key, expire=expire, id=id)
def locked_get_or_set(self, key, value_creator, version=None,
expire=None, id=None, lock_key=None,
timeout=DEFAULT_TIMEOUT):
"""
Fetch a given key from the cache. If the key does not exist, the key is added and
set to the value returned when calling `value_creator`. The creator function
is invoked inside of a lock.
"""
if lock_key is None:
lock_key = 'get_or_set:' + key
val = self.get(key, version=version)
if val is not None:
return val
with self.lock(lock_key, expire=expire, id=id):
# Was the value set while we were trying to acquire the lock?
val = self.get(key, version=version)
if val is not None:
return val
# Nope, create value now.
val = value_creator()
if val is None:
raise ValueError('`value_creator` must return a value')
self.set(key, val, timeout=timeout, version=version)
return val
def reset_all(self):
"""
Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
"""
reset_all(self.__client)
|
[
"redis_lock.Lock",
"redis_lock.reset_all"
] |
[((611, 657), 'redis_lock.Lock', 'Lock', (['self.__client', 'key'], {'expire': 'expire', 'id': 'id'}), '(self.__client, key, expire=expire, id=id)\n', (615, 657), False, 'from redis_lock import Lock\n'), ((1898, 1922), 'redis_lock.reset_all', 'reset_all', (['self.__client'], {}), '(self.__client)\n', (1907, 1922), False, 'from redis_lock import reset_all\n')]
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("h4ValidData")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:./ECALH4TB_data_hits.root')
)
process.tbValidData = cms.EDAnalyzer("EcalTBValidation",
rootfile = cms.untracked.string('EcalTBValidationData.root'),
eventHeaderProducer = cms.string('ecalTBunpack'),
hitProducer = cms.string('ecal2006TBWeightUncalibRecHit'),
digiCollection = cms.string(''),
tdcRecInfoCollection = cms.string('EcalTBTDCRecInfo'),
data_ = cms.untracked.int32(0),
digiProducer = cms.string('ecalUnsuppressedDigis'),
xtalInBeam = cms.untracked.int32(1104),
hitCollection = cms.string('EcalUncalibRecHitsEB'),
hodoRecInfoProducer = cms.string('ecal2006TBHodoscopeReconstructor'),
eventHeaderCollection = cms.string(''),
hodoRecInfoCollection = cms.string('EcalTBHodoscopeRecInfo'),
tdcRecInfoProducer = cms.string('ecal2006TBTDCReconstructor')
)
process.p = cms.Path(process.tbValidData)
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.Config.untracked.vstring",
"FWCore.ParameterSet.Config.Path"
] |
[((52, 78), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""h4ValidData"""'], {}), "('h4ValidData')\n", (63, 78), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1151, 1180), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['process.tbValidData'], {}), '(process.tbValidData)\n', (1159, 1180), True, 'import FWCore.ParameterSet.Config as cms\n'), ((215, 238), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(-1)'], {}), '(-1)\n', (234, 238), True, 'import FWCore.ParameterSet.Config as cms\n'), ((299, 354), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""file:./ECALH4TB_data_hits.root"""'], {}), "('file:./ECALH4TB_data_hits.root')\n", (320, 354), True, 'import FWCore.ParameterSet.Config as cms\n'), ((430, 479), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""EcalTBValidationData.root"""'], {}), "('EcalTBValidationData.root')\n", (450, 479), True, 'import FWCore.ParameterSet.Config as cms\n'), ((507, 533), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ecalTBunpack"""'], {}), "('ecalTBunpack')\n", (517, 533), True, 'import FWCore.ParameterSet.Config as cms\n'), ((553, 596), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ecal2006TBWeightUncalibRecHit"""'], {}), "('ecal2006TBWeightUncalibRecHit')\n", (563, 596), True, 'import FWCore.ParameterSet.Config as cms\n'), ((619, 633), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (629, 633), True, 'import FWCore.ParameterSet.Config as cms\n'), ((662, 692), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalTBTDCRecInfo"""'], {}), "('EcalTBTDCRecInfo')\n", (672, 692), True, 'import FWCore.ParameterSet.Config as cms\n'), ((706, 728), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (725, 728), True, 'import FWCore.ParameterSet.Config as cms\n'), ((749, 784), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ecalUnsuppressedDigis"""'], {}), "('ecalUnsuppressedDigis')\n", (759, 784), True, 'import FWCore.ParameterSet.Config as cms\n'), ((803, 828), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1104)'], {}), '(1104)\n', (822, 828), True, 'import FWCore.ParameterSet.Config as cms\n'), ((850, 884), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalUncalibRecHitsEB"""'], {}), "('EcalUncalibRecHitsEB')\n", (860, 884), True, 'import FWCore.ParameterSet.Config as cms\n'), ((912, 958), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ecal2006TBHodoscopeReconstructor"""'], {}), "('ecal2006TBHodoscopeReconstructor')\n", (922, 958), True, 'import FWCore.ParameterSet.Config as cms\n'), ((988, 1002), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (998, 1002), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1032, 1068), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalTBHodoscopeRecInfo"""'], {}), "('EcalTBHodoscopeRecInfo')\n", (1042, 1068), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1095, 1135), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ecal2006TBTDCReconstructor"""'], {}), "('ecal2006TBTDCReconstructor')\n", (1105, 1135), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import asyncio
from query_graphql import query_artifact_domains, query_weapon_materials_book
class Domains:
leylines = {
"Blossom of Revelation": "Character EXP Materials",
"Blossom of Wealth": "Mora"
}
weapon_domains = {}
talent_domains = {}
artifact_domains = {}
trounce_domains = {
"Wolf of the North Challenge": "Andrius (Lupus Boreas), Dominator of Wolves",
"Beneath the Dragon-Queller": "Azhdaha, Sealed Lord of Vishaps",
"Enter the Golden House": "Childe, Eleventh of the Fatui Harbingers",
"Narukami Island: Tenshukaku": "La Signora (Rosalyne-Kruzchka Lohefalter), The Fair Lady",
"End of the Oneiric Euthymia": "<NAME> no Mikoto, Raiden no Inazuma Tono"
}
world_bosses = {
"Anemo Hypostasis": None,
"Electro Hypostasis": None,
"Cryo Regisvine": None,
"Cryo Hypostasis": None,
"Oceanid": None,
"Pyro Regisvine": None,
"Geo Hypostasis": None,
"Primo Geovishap": None,
"Maguu Kenki": None,
"Pyro Hypostasis": None,
"Perpetual Mechanical Array": None,
"Hydro Hypostasis": None,
"Thunder Manifestation": None,
"Golden Wolflord": None,
"Bathysmal Vishap Herd": None,
"Ruin Serpent": None,
}
@staticmethod
async def initialize():
Domains.artifact_domains = await query_artifact_domains()
tuple = await query_weapon_materials_book()
Domains.weapon_domains = tuple[0]
Domains.talent_domains = tuple[1]
Domains.domains = {
"Ley Line Outcrops": Domains.leylines,
"Weapon Ascension Materials": Domains.weapon_domains,
"Talent Books": Domains.talent_domains,
"Artifacts": Domains.artifact_domains,
"Trounce Domains": Domains.trounce_domains,
"World Bosses": Domains.world_bosses
}
|
[
"query_graphql.query_artifact_domains",
"query_graphql.query_weapon_materials_book"
] |
[((1407, 1431), 'query_graphql.query_artifact_domains', 'query_artifact_domains', ([], {}), '()\n', (1429, 1431), False, 'from query_graphql import query_artifact_domains, query_weapon_materials_book\n'), ((1454, 1483), 'query_graphql.query_weapon_materials_book', 'query_weapon_materials_book', ([], {}), '()\n', (1481, 1483), False, 'from query_graphql import query_artifact_domains, query_weapon_materials_book\n')]
|
# __author__ = 'Dave'
import cv2
from skimage import io
from skimage.transform import probabilistic_hough_line
import matplotlib.pyplot as plt
import os
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore', category=RuntimeWarning)
class CorrectImage(object):
def __init__(self):
self.path = ""
self.name = ""
self.image = None
self.edges = None
self.lines = None
def _load_image(self, image):
"""
:param image: image file name (str)
:return: skimage image data
"""
filename = os.path.join(self.path, image)
return io.imread(filename)
def add_path(self, image_path):
"""
Adds image to the list of images
:param image_path: (string)
"""
self.path = image_path + '/'
def add_image(self, filename):
"""
Adds image to the list of images
:param filename: (string)
"""
self.name = filename
self.hough_transform()
def _detect_edges(self, image, vary=False, plot=False):
"""
:param image: image file name (str)
:param vary: turn tunable plotting on
:param plot: turn plotting on
:return: detected edges with variable filters
"""
self.image = self._load_image(image)
if vary:
def nothing(x):
pass
cv2.namedWindow('image')
cv2.createTrackbar('th1', 'image', 0, 255, nothing)
cv2.createTrackbar('th2', 'image', 0, 255, nothing)
while True:
th1 = cv2.getTrackbarPos('th1', 'image')
th2 = cv2.getTrackbarPos('th2', 'image')
edges = cv2.Canny(self.image, th1, th2)
cv2.imshow('image', edges)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
edges = cv2.Canny(self.image, 255, 255)
if plot:
cv2.namedWindow('image')
cv2.imshow('image', edges)
cv2.waitKey(5000)
cv2.destroyAllWindows()
return edges
def hough_transform(self, vary=False, plot=False):
"""
:param vary: turn edge detection tunable plotting on
:param plot: turn plotting on
:return: numpy array of probabilistically found straight lines
"""
if self.name == "":
raise ValueError('Missing image: you need to specify the image file using add_image.')
self.edges = self._detect_edges(self.name, vary=vary, plot=plot)
self.lines = probabilistic_hough_line(self.edges, threshold=10, line_length=5, line_gap=3)
if plot:
for line in self.lines:
p0, p1 = line
plt.plot((p0[0], p1[0]), (p0[1], p1[1]))
plt.show()
@staticmethod
def slope(lines):
"""
:param lines: array of coordinates (ie. [((x0, y0), (xf, yf)), ...]
:return: array of slope values with the same number of entries as lines
"""
# for doing vectorized subtraction across all line pairs,
# we need the first line of each pair to be the negative of itself
sign_op = np.ones_like(lines)
sign_op[:, :, 0] *= -1
# get the differences between x and y coordinates (start, end), respectively
slopes = np.sum(sign_op * lines, axis=2)
# compute the slopes of each line for every line pair
slopes = slopes[:, :, 0] / slopes[:, :, 1]
# turn infinite values to a finite, but very large value
slopes[np.isinf(slopes)] = 1e6
# this catches cases when the line - as defined - is actually a point and the slope doesn't exist
slopes[np.isnan(slopes)] = 0
return slopes
def line_pair(self, num_pairs):
"""
:param num_pairs: number of line pairs to take (int)
:return: line pairs (array)
"""
idx = np.random.randint(len(self.lines), size=num_pairs * 2)
lines = np.array(self.lines)[idx]
return lines.reshape(num_pairs, 2, 2, 2)
@staticmethod
def mutation(pairs, p_mutate=0.01):
"""
:param pairs: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines
:param p_mutate: (float) probability of a mutation
:return: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines with mutations
"""
for i in range(len(pairs)):
if p_mutate > random.random():
# column = np.random.randint(low=0, high=2)
for column in [0, 1]:
t = pairs[i, :, :, column]
low, high = np.min(t), np.max(t)
if high == low:
high *= 2
pairs[i, :, :, column] = np.random.randint(low=low, high=high, size=t.shape)
return pairs
|
[
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.min",
"skimage.transform.probabilistic_hough_line",
"numpy.isinf",
"cv2.waitKey",
"skimage.io.imread",
"numpy.isnan",
"cv2.Canny",
"cv2.createTrackbar",
"cv2.namedWindow",
"warnings.filterwarnings",
"matplotlib.pyplot.show",
"numpy.ones_like",
"os.path.join",
"numpy.sum",
"numpy.random.randint",
"random.random",
"cv2.getTrackbarPos"
] |
[((205, 263), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (228, 263), False, 'import warnings\n'), ((601, 631), 'os.path.join', 'os.path.join', (['self.path', 'image'], {}), '(self.path, image)\n', (613, 631), False, 'import os\n'), ((647, 666), 'skimage.io.imread', 'io.imread', (['filename'], {}), '(filename)\n', (656, 666), False, 'from skimage import io\n'), ((1968, 1999), 'cv2.Canny', 'cv2.Canny', (['self.image', '(255)', '(255)'], {}), '(self.image, 255, 255)\n', (1977, 1999), False, 'import cv2\n'), ((2652, 2729), 'skimage.transform.probabilistic_hough_line', 'probabilistic_hough_line', (['self.edges'], {'threshold': '(10)', 'line_length': '(5)', 'line_gap': '(3)'}), '(self.edges, threshold=10, line_length=5, line_gap=3)\n', (2676, 2729), False, 'from skimage.transform import probabilistic_hough_line\n'), ((3274, 3293), 'numpy.ones_like', 'np.ones_like', (['lines'], {}), '(lines)\n', (3286, 3293), True, 'import numpy as np\n'), ((3428, 3459), 'numpy.sum', 'np.sum', (['(sign_op * lines)'], {'axis': '(2)'}), '(sign_op * lines, axis=2)\n', (3434, 3459), True, 'import numpy as np\n'), ((1428, 1452), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (1443, 1452), False, 'import cv2\n'), ((1465, 1516), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""th1"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('th1', 'image', 0, 255, nothing)\n", (1483, 1516), False, 'import cv2\n'), ((1529, 1580), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""th2"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('th2', 'image', 0, 255, nothing)\n", (1547, 1580), False, 'import cv2\n'), ((1928, 1951), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1949, 1951), False, 'import cv2\n'), ((2029, 2053), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (2044, 2053), False, 'import cv2\n'), ((2066, 2092), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'edges'], {}), "('image', edges)\n", (2076, 2092), False, 'import cv2\n'), ((2105, 2122), 'cv2.waitKey', 'cv2.waitKey', (['(5000)'], {}), '(5000)\n', (2116, 2122), False, 'import cv2\n'), ((2135, 2158), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2156, 2158), False, 'import cv2\n'), ((2882, 2892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2890, 2892), True, 'import matplotlib.pyplot as plt\n'), ((3655, 3671), 'numpy.isinf', 'np.isinf', (['slopes'], {}), '(slopes)\n', (3663, 3671), True, 'import numpy as np\n'), ((3801, 3817), 'numpy.isnan', 'np.isnan', (['slopes'], {}), '(slopes)\n', (3809, 3817), True, 'import numpy as np\n'), ((4090, 4110), 'numpy.array', 'np.array', (['self.lines'], {}), '(self.lines)\n', (4098, 4110), True, 'import numpy as np\n'), ((1628, 1662), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""th1"""', '"""image"""'], {}), "('th1', 'image')\n", (1646, 1662), False, 'import cv2\n'), ((1685, 1719), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""th2"""', '"""image"""'], {}), "('th2', 'image')\n", (1703, 1719), False, 'import cv2\n'), ((1744, 1775), 'cv2.Canny', 'cv2.Canny', (['self.image', 'th1', 'th2'], {}), '(self.image, th1, th2)\n', (1753, 1775), False, 'import cv2\n'), ((1792, 1818), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'edges'], {}), "('image', edges)\n", (1802, 1818), False, 'import cv2\n'), ((2829, 2869), 'matplotlib.pyplot.plot', 'plt.plot', (['(p0[0], p1[0])', '(p0[1], p1[1])'], {}), '((p0[0], p1[0]), (p0[1], p1[1]))\n', (2837, 2869), True, 'import matplotlib.pyplot as plt\n'), ((4552, 4567), 'random.random', 'random.random', ([], {}), '()\n', (4565, 4567), False, 'import random\n'), ((1839, 1853), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1850, 1853), False, 'import cv2\n'), ((4882, 4933), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'high': 'high', 'size': 't.shape'}), '(low=low, high=high, size=t.shape)\n', (4899, 4933), True, 'import numpy as np\n'), ((4746, 4755), 'numpy.min', 'np.min', (['t'], {}), '(t)\n', (4752, 4755), True, 'import numpy as np\n'), ((4757, 4766), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (4763, 4766), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2021/9/18 下午11:19
# @Author : DaiPuWei
# @Email : <EMAIL>
# @File : loss.py
# @Software: PyCharm
"""
这是YOLO模型的损失函数的定义脚本,目前目标分类损失支持smooth Label;
目标定位损失支持均方差损失、GIOU Loss、DIOU Loss和CIOU Loss;
"""
import math
import tensorflow as tf
from tensorflow.keras import backend as K
# ---------------------------------------------------#
# 平滑标签
# ---------------------------------------------------#
def _smooth_labels(y_true, label_smoothing):
num_classes = tf.cast(K.shape(y_true)[-1], dtype=K.floatx())
label_smoothing = K.constant(label_smoothing, dtype=K.floatx())
return y_true * (1.0 - label_smoothing) + label_smoothing / num_classes
# ---------------------------------------------------#
# 将预测值的每个特征层调成真实值
# ---------------------------------------------------#
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
num_anchors = len(anchors)
# ---------------------------------------------------#
# [1, 1, 1, num_anchors, 2]
# ---------------------------------------------------#
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
# ---------------------------------------------------#
# 获得x,y的网格
# (13, 13, 1, 2)
# ---------------------------------------------------#
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
# ---------------------------------------------------#
# 将预测结果调整成(batch_size,13,13,3,85)
# 85可拆分成4 + 1 + 80
# 4代表的是中心宽高的调整参数
# 1代表的是框的置信度
# 80代表的是种类的置信度
# ---------------------------------------------------#
feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# ---------------------------------------------------#
# 将预测值调成真实值
# box_xy对应框的中心点
# box_wh对应框的宽和高
# ---------------------------------------------------#
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
# ---------------------------------------------------------------------#
# 在计算loss的时候返回grid, feats, box_xy, box_wh
# 在预测的时候返回box_xy, box_wh, box_confidence, box_class_probs
# ---------------------------------------------------------------------#
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
# ---------------------------------------------------#
# 用于计算每个预测框与真实框的iou
# ---------------------------------------------------#
def box_iou(b_true, b_pred):
# 13,13,3,1,4
# 计算左上角的坐标和右下角的坐标
b_true = K.expand_dims(b_true, -2)
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
# 1,n,4
# 计算左上角和右下角的坐标
b_pred = K.expand_dims(b_pred, 0)
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# 计算重合面积
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
iou = intersect_area / (b_true_area + b_pred_area - intersect_area)
return iou
def box_giou(b_true, b_pred):
"""
Calculate GIoU loss on anchor boxes
Reference Paper:
"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression"
https://arxiv.org/abs/1902.09630
Parameters
----------
b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
Returns
-------
giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b_true_area + b_pred_area - intersect_area
# calculate IoU, add epsilon in denominator to avoid dividing by 0
iou = intersect_area / (union_area + K.epsilon())
# get enclosed area
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
# calculate GIoU, add epsilon in denominator to avoid dividing by 0
giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon())
giou = K.expand_dims(giou, -1)
return giou
def box_diou(b_true, b_pred,use_ciou_loss=False):
"""
输入为:
----------
b1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
返回为:
-------
ciou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
# 求出预测框左上角右下角
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
# 求出真实框左上角右下角
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# 求真实框和预测框所有的iou
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b1_area + b_pred_area - intersect_area
iou = intersect_area / K.maximum(union_area, K.epsilon())
# 计算中心的差距
center_distance = K.sum(K.square(b_true_xy - b_pred_xy), axis=-1)
# 找到包裹两个框的最小框的左上角和右下角
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
# 计算对角线距离
enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)
diou = iou - 1.0 * (center_distance) / K.maximum(enclose_diagonal, K.epsilon())
if use_ciou_loss:
v = 4 * K.square(tf.math.atan2(b_true_wh[..., 0], K.maximum(b_true_wh[..., 1], K.epsilon()))
- tf.math.atan2(b_pred_wh[..., 0],K.maximum(b_pred_wh[..., 1],K.epsilon()))) / (math.pi * math.pi)
# a trick: here we add an non-gradient coefficient w^2+h^2 to v to customize it's back-propagate,
# to match related description for equation (12) in original paper
#
#
# v'/w' = (8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (h/(w^2+h^2)) (12)
# v'/h' = -(8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (w/(w^2+h^2))
#
# The dominator w^2+h^2 is usually a small value for the cases
# h and w ranging in [0; 1], which is likely to yield gradient
# explosion. And thus in our implementation, the dominator
# w^2+h^2 is simply removed for stable convergence, by which
# the step size 1/(w^2+h^2) is replaced by 1 and the gradient direction
# is still consistent with Eqn. (12).
v = v * tf.stop_gradient(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])
alpha = v / K.maximum((1.0 - iou + v), K.epsilon())
diou = diou - alpha * v
diou = K.expand_dims(diou, -1)
diou = tf.where(tf.math.is_nan(diou), tf.zeros_like(diou), diou)
return diou
# ---------------------------------------------------#
# loss值计算
# ---------------------------------------------------#
def yolo_loss(args, anchors,num_classes,ignore_threshold=.5,label_smoothing=0.1,
use_giou_loss=False,use_diou_loss=False,use_ciou_loss=False,normalize=True,model_name='yolov3'):
# 根据不同yolo模型初始化不同anchor掩膜和输出层数
if model_name == "yolov3": # yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov3-spp': # yolov3-spp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4': # yolov4
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4-csp': # yolov4-csp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4-p5': # yolov4-p5
anchor_mask = [[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 3
elif model_name == 'yolov4-p6': # yolov4-p6
anchor_mask = [[12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 4
elif model_name == 'yolov4-p7': # yolov4-p7
anchor_mask = [[16, 17, 18, 19], [12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 5
elif model_name == 'yolov3-tiny': # yolov3-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
elif model_name == 'yolov4-tiny': # yolov4-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
else: # 默认为yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
# 将预测结果和实际ground truth分开,args是[*model_body.output, *y_true]
y_true = args[num_layers:]
yolo_outputs = args[:num_layers]
# 根据不同yolo模型初始化输入尺度和网格尺度
if model_name == "yolov3": # yolov3
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov3-spp': # yolov3-spp
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4': # yolov4
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-csp': # yolov4-csp
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-p5': # yolov4-p5
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-p6': # yolov4-p6
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*64, K.dtype(y_true[0]))
elif model_name == 'yolov4-p7': # yolov4-p7
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*128, K.dtype(y_true[0]))
elif model_name == 'yolov3-tiny': # yolov3-tiny
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-tiny': # yolov4-tiny
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
else: # 默认为yolov3
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[l])) for l in range(num_layers)]
loss = 0
num_pos = 0
m = K.shape(yolo_outputs[0])[0]
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
# -----------------------------------------------------------#
# 以第一个特征层(m,13,13,3,85)为例子
# 取出该特征层中存在目标的点的位置。(m,13,13,3,1)
# -----------------------------------------------------------#
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
if label_smoothing: # 使用平滑标签
true_class_probs = _smooth_labels(true_class_probs, label_smoothing)
# -----------------------------------------------------------#
# 将yolo_outputs的特征层输出进行处理、获得四个返回值
# grid为网格坐标
# raw_pred为尚未处理的预测结果
# pred_xy为解码后的中心坐标
# pred_wh为解码后的宽高坐标
# -----------------------------------------------------------#
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
# pred_box是解码后的预测的box的位置
pred_box = K.concatenate([pred_xy, pred_wh])
# -----------------------------------------------------------#
# 找到负样本群组,第一步是创建一个数组,[]
# -----------------------------------------------------------#
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
# 对每一张图片计算ignore_mask
def loop_body(b, ignore_mask):
# 取出n个真实框:n,4
true_box = tf.boolean_mask(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0])
# -----------------------------------------------------------#
# 计算预测框与真实框的iou
# pred_box为预测框的坐标
# true_box为真实框的坐标
# iou为预测框和真实框的iou
# -----------------------------------------------------------#
iou = box_iou(pred_box[b], true_box)
# best_iou为每个特征点与真实框的最大重合程度
best_iou = K.max(iou, axis=-1)
# -----------------------------------------------------------#
# 判断预测框和真实框的最大iou小于ignore_thresh
# 则认为该预测框没有与之对应的真实框
# 该操作的目的是:
# 忽略预测结果与真实框非常对应特征点,因为这些框已经比较准了
# 不适合当作负样本,所以忽略掉。
# -----------------------------------------------------------#
ignore_mask = ignore_mask.write(b, K.cast(best_iou < ignore_threshold, K.dtype(true_box)))
return b + 1, ignore_mask
# 在这个地方进行一个循环、循环是对每一张图片进行的
_, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body, [0, ignore_mask])
# ignore_mask用于提取出作为负样本的特征点
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# 真实框越大,比重越小,小框的比重更大。
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
# ------------------------------------------------------------------------------#
# 如果该位置本来有框,那么计算1与置信度的交叉熵
# 如果该位置本来没有框,那么计算0与置信度的交叉熵
# 在这其中会忽略一部分样本,这些被忽略的样本满足条件best_iou<ignore_thresh
# 该操作的目的是:
# 忽略预测结果与真实框非常对应特征点,因为这些框已经比较准了
# 不适合当作负样本,所以忽略掉。
# ------------------------------------------------------------------------------#
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[..., 4:5], from_logits=True) + \
(1 - object_mask) * K.binary_crossentropy(object_mask, raw_pred[..., 4:5],
from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[..., 5:], from_logits=True)
# 根据不同参数选择不同定位损失
if use_giou_loss: # 计算GIOU损失
raw_true_box = y_true[l][..., 0:4]
giou = box_giou(raw_true_box, pred_box)
giou_loss = object_mask * box_loss_scale * (1 - giou)
giou_loss = K.sum(giou_loss)
location_loss = giou_loss
elif use_diou_loss: # 计算DIOU损失
raw_true_box = y_true[l][..., 0:4]
diou = box_diou(pred_box, raw_true_box, use_ciou_loss=False)
diou_loss = object_mask * box_loss_scale * (1 - diou)
location_loss = diou_loss
elif use_ciou_loss: # 计算CIOU损失
raw_true_box = y_true[l][..., 0:4]
ciou = box_diou(pred_box, raw_true_box,use_ciou_loss=True)
ciou_loss = object_mask * box_loss_scale * (1 - ciou)
location_loss = ciou_loss
else: # YOLO v3边界框定位损失
# Standard YOLOv3 location loss
# K.binary_crossentropy is helpful to avoid exp overflow.
raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[..., 0:2],
from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh - raw_pred[..., 2:4])
xy_loss = K.sum(xy_loss)
wh_loss = K.sum(wh_loss)
location_loss = xy_loss + wh_loss
location_loss = K.sum(location_loss)
confidence_loss = K.sum(confidence_loss)
class_loss = K.sum(class_loss)
# 计算正样本数量
num_pos += tf.maximum(K.sum(K.cast(object_mask, tf.float32)), 1)
loss += location_loss + confidence_loss + class_loss
loss = K.expand_dims(loss, axis=-1)
# 计算YOLO模型损失
if normalize:
loss = loss / num_pos
else:
loss = loss / mf
return loss
|
[
"tensorflow.keras.backend.log",
"tensorflow.keras.backend.floatx",
"tensorflow.keras.backend.epsilon",
"tensorflow.boolean_mask",
"tensorflow.keras.backend.dtype",
"tensorflow.keras.backend.sigmoid",
"tensorflow.while_loop",
"tensorflow.keras.backend.expand_dims",
"tensorflow.keras.backend.arange",
"tensorflow.keras.backend.shape",
"tensorflow.keras.backend.maximum",
"tensorflow.keras.backend.max",
"tensorflow.keras.backend.cast",
"tensorflow.zeros_like",
"tensorflow.keras.backend.binary_crossentropy",
"tensorflow.keras.backend.square",
"tensorflow.keras.backend.zeros_like",
"tensorflow.keras.backend.minimum",
"tensorflow.keras.backend.reshape",
"tensorflow.keras.backend.concatenate",
"tensorflow.keras.backend.exp",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.constant",
"tensorflow.stop_gradient",
"tensorflow.math.is_nan"
] |
[((1639, 1670), 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[grid_x, grid_y]'], {}), '([grid_x, grid_y])\n', (1652, 1670), True, 'from tensorflow.keras import backend as K\n'), ((1970, 2057), 'tensorflow.keras.backend.reshape', 'K.reshape', (['feats', '[-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]'], {}), '(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, \n num_classes + 5])\n', (1979, 2057), True, 'from tensorflow.keras import backend as K\n'), ((2453, 2479), 'tensorflow.keras.backend.sigmoid', 'K.sigmoid', (['feats[..., 4:5]'], {}), '(feats[..., 4:5])\n', (2462, 2479), True, 'from tensorflow.keras import backend as K\n'), ((2502, 2527), 'tensorflow.keras.backend.sigmoid', 'K.sigmoid', (['feats[..., 5:]'], {}), '(feats[..., 5:])\n', (2511, 2527), True, 'from tensorflow.keras import backend as K\n'), ((3139, 3164), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['b_true', '(-2)'], {}), '(b_true, -2)\n', (3152, 3164), True, 'from tensorflow.keras import backend as K\n'), ((3402, 3426), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['b_pred', '(0)'], {}), '(b_pred, 0)\n', (3415, 3426), True, 'from tensorflow.keras import backend as K\n'), ((3654, 3689), 'tensorflow.keras.backend.maximum', 'K.maximum', (['b_true_mins', 'b_pred_mins'], {}), '(b_true_mins, b_pred_mins)\n', (3663, 3689), True, 'from tensorflow.keras import backend as K\n'), ((3712, 3749), 'tensorflow.keras.backend.minimum', 'K.minimum', (['b_true_maxes', 'b_pred_maxes'], {}), '(b_true_maxes, b_pred_maxes)\n', (3721, 3749), True, 'from tensorflow.keras import backend as K\n'), ((3769, 3817), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (3778, 3817), True, 'from tensorflow.keras import backend as K\n'), ((5015, 5050), 'tensorflow.keras.backend.maximum', 'K.maximum', (['b_true_mins', 'b_pred_mins'], {}), '(b_true_mins, b_pred_mins)\n', (5024, 5050), True, 'from tensorflow.keras import backend as K\n'), ((5073, 5110), 'tensorflow.keras.backend.minimum', 'K.minimum', (['b_true_maxes', 'b_pred_maxes'], {}), '(b_true_maxes, b_pred_maxes)\n', (5082, 5110), True, 'from tensorflow.keras import backend as K\n'), ((5130, 5178), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (5139, 5178), True, 'from tensorflow.keras import backend as K\n'), ((5584, 5619), 'tensorflow.keras.backend.minimum', 'K.minimum', (['b_true_mins', 'b_pred_mins'], {}), '(b_true_mins, b_pred_mins)\n', (5593, 5619), True, 'from tensorflow.keras import backend as K\n'), ((5640, 5677), 'tensorflow.keras.backend.maximum', 'K.maximum', (['b_true_maxes', 'b_pred_maxes'], {}), '(b_true_maxes, b_pred_maxes)\n', (5649, 5677), True, 'from tensorflow.keras import backend as K\n'), ((5695, 5739), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(enclose_maxes - enclose_mins)', '(0.0)'], {}), '(enclose_maxes - enclose_mins, 0.0)\n', (5704, 5739), True, 'from tensorflow.keras import backend as K\n'), ((5964, 5987), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['giou', '(-1)'], {}), '(giou, -1)\n', (5977, 5987), True, 'from tensorflow.keras import backend as K\n'), ((6777, 6812), 'tensorflow.keras.backend.maximum', 'K.maximum', (['b_true_mins', 'b_pred_mins'], {}), '(b_true_mins, b_pred_mins)\n', (6786, 6812), True, 'from tensorflow.keras import backend as K\n'), ((6835, 6872), 'tensorflow.keras.backend.minimum', 'K.minimum', (['b_true_maxes', 'b_pred_maxes'], {}), '(b_true_maxes, b_pred_maxes)\n', (6844, 6872), True, 'from tensorflow.keras import backend as K\n'), ((6892, 6940), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (6901, 6940), True, 'from tensorflow.keras import backend as K\n'), ((7361, 7396), 'tensorflow.keras.backend.minimum', 'K.minimum', (['b_true_mins', 'b_pred_mins'], {}), '(b_true_mins, b_pred_mins)\n', (7370, 7396), True, 'from tensorflow.keras import backend as K\n'), ((7417, 7454), 'tensorflow.keras.backend.maximum', 'K.maximum', (['b_true_maxes', 'b_pred_maxes'], {}), '(b_true_maxes, b_pred_maxes)\n', (7426, 7454), True, 'from tensorflow.keras import backend as K\n'), ((7472, 7516), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(enclose_maxes - enclose_mins)', '(0.0)'], {}), '(enclose_maxes - enclose_mins, 0.0)\n', (7481, 7516), True, 'from tensorflow.keras import backend as K\n'), ((9004, 9027), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['diou', '(-1)'], {}), '(diou, -1)\n', (9017, 9027), True, 'from tensorflow.keras import backend as K\n'), ((18759, 18787), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['loss'], {'axis': '(-1)'}), '(loss, axis=-1)\n', (18772, 18787), True, 'from tensorflow.keras import backend as K\n'), ((1117, 1136), 'tensorflow.keras.backend.constant', 'K.constant', (['anchors'], {}), '(anchors)\n', (1127, 1136), True, 'from tensorflow.keras import backend as K\n'), ((1341, 1355), 'tensorflow.keras.backend.shape', 'K.shape', (['feats'], {}), '(feats)\n', (1348, 1355), True, 'from tensorflow.keras import backend as K\n'), ((1695, 1709), 'tensorflow.keras.backend.dtype', 'K.dtype', (['feats'], {}), '(feats)\n', (1702, 1709), True, 'from tensorflow.keras import backend as K\n'), ((7274, 7305), 'tensorflow.keras.backend.square', 'K.square', (['(b_true_xy - b_pred_xy)'], {}), '(b_true_xy - b_pred_xy)\n', (7282, 7305), True, 'from tensorflow.keras import backend as K\n'), ((7560, 7580), 'tensorflow.keras.backend.square', 'K.square', (['enclose_wh'], {}), '(enclose_wh)\n', (7568, 7580), True, 'from tensorflow.keras import backend as K\n'), ((9048, 9068), 'tensorflow.math.is_nan', 'tf.math.is_nan', (['diou'], {}), '(diou)\n', (9062, 9068), True, 'import tensorflow as tf\n'), ((9070, 9089), 'tensorflow.zeros_like', 'tf.zeros_like', (['diou'], {}), '(diou)\n', (9083, 9089), True, 'import tensorflow as tf\n'), ((12898, 12922), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12905, 12922), True, 'from tensorflow.keras import backend as K\n'), ((12945, 12969), 'tensorflow.keras.backend.dtype', 'K.dtype', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12952, 12969), True, 'from tensorflow.keras import backend as K\n'), ((13984, 14017), 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[pred_xy, pred_wh]'], {}), '([pred_xy, pred_wh])\n', (13997, 14017), True, 'from tensorflow.keras import backend as K\n'), ((14306, 14333), 'tensorflow.keras.backend.cast', 'K.cast', (['object_mask', '"""bool"""'], {}), "(object_mask, 'bool')\n", (14312, 14333), True, 'from tensorflow.keras import backend as K\n'), ((15469, 15535), 'tensorflow.while_loop', 'tf.while_loop', (['(lambda b, *args: b < m)', 'loop_body', '[0, ignore_mask]'], {}), '(lambda b, *args: b < m, loop_body, [0, ignore_mask])\n', (15482, 15535), True, 'import tensorflow as tf\n'), ((15637, 15667), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['ignore_mask', '(-1)'], {}), '(ignore_mask, -1)\n', (15650, 15667), True, 'from tensorflow.keras import backend as K\n'), ((18487, 18507), 'tensorflow.keras.backend.sum', 'K.sum', (['location_loss'], {}), '(location_loss)\n', (18492, 18507), True, 'from tensorflow.keras import backend as K\n'), ((18534, 18556), 'tensorflow.keras.backend.sum', 'K.sum', (['confidence_loss'], {}), '(confidence_loss)\n', (18539, 18556), True, 'from tensorflow.keras import backend as K\n'), ((18578, 18595), 'tensorflow.keras.backend.sum', 'K.sum', (['class_loss'], {}), '(class_loss)\n', (18583, 18595), True, 'from tensorflow.keras import backend as K\n'), ((515, 530), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (522, 530), True, 'from tensorflow.keras import backend as K\n'), ((542, 552), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (550, 552), True, 'from tensorflow.keras import backend as K\n'), ((610, 620), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (618, 620), True, 'from tensorflow.keras import backend as K\n'), ((1408, 1439), 'tensorflow.keras.backend.arange', 'K.arange', (['(0)'], {'stop': 'grid_shape[0]'}), '(0, stop=grid_shape[0])\n', (1416, 1439), True, 'from tensorflow.keras import backend as K\n'), ((1533, 1564), 'tensorflow.keras.backend.arange', 'K.arange', (['(0)'], {'stop': 'grid_shape[1]'}), '(0, stop=grid_shape[1])\n', (1541, 1564), True, 'from tensorflow.keras import backend as K\n'), ((2248, 2273), 'tensorflow.keras.backend.sigmoid', 'K.sigmoid', (['feats[..., :2]'], {}), '(feats[..., :2])\n', (2257, 2273), True, 'from tensorflow.keras import backend as K\n'), ((2314, 2328), 'tensorflow.keras.backend.dtype', 'K.dtype', (['feats'], {}), '(feats)\n', (2321, 2328), True, 'from tensorflow.keras import backend as K\n'), ((2343, 2365), 'tensorflow.keras.backend.exp', 'K.exp', (['feats[..., 2:4]'], {}), '(feats[..., 2:4])\n', (2348, 2365), True, 'from tensorflow.keras import backend as K\n'), ((2416, 2430), 'tensorflow.keras.backend.dtype', 'K.dtype', (['feats'], {}), '(feats)\n', (2423, 2430), True, 'from tensorflow.keras import backend as K\n'), ((5527, 5538), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (5536, 5538), True, 'from tensorflow.keras import backend as K\n'), ((7218, 7229), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7227, 7229), True, 'from tensorflow.keras import backend as K\n'), ((8804, 8903), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])'], {}), '(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] *\n b_pred_wh[..., 1])\n', (8820, 8903), True, 'import tensorflow as tf\n'), ((11336, 11354), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (11343, 11354), True, 'from tensorflow.keras import backend as K\n'), ((12812, 12830), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[l]'], {}), '(y_true[l])\n', (12819, 12830), True, 'from tensorflow.keras import backend as K\n'), ((14232, 14250), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (14239, 14250), True, 'from tensorflow.keras import backend as K\n'), ((14453, 14521), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['y_true[l][b, ..., 0:4]', 'object_mask_bool[b, ..., 0]'], {}), '(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0])\n', (14468, 14521), True, 'import tensorflow as tf\n'), ((14912, 14931), 'tensorflow.keras.backend.max', 'K.max', (['iou'], {'axis': '(-1)'}), '(iou, axis=-1)\n', (14917, 14931), True, 'from tensorflow.keras import backend as K\n'), ((16528, 16604), 'tensorflow.keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['true_class_probs', 'raw_pred[..., 5:]'], {'from_logits': '(True)'}), '(true_class_probs, raw_pred[..., 5:], from_logits=True)\n', (16549, 16604), True, 'from tensorflow.keras import backend as K\n'), ((16881, 16897), 'tensorflow.keras.backend.sum', 'K.sum', (['giou_loss'], {}), '(giou_loss)\n', (16886, 16897), True, 'from tensorflow.keras import backend as K\n'), ((5940, 5951), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (5949, 5951), True, 'from tensorflow.keras import backend as K\n'), ((7662, 7673), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7671, 7673), True, 'from tensorflow.keras import backend as K\n'), ((8947, 8958), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (8956, 8958), True, 'from tensorflow.keras import backend as K\n'), ((11492, 11510), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (11499, 11510), True, 'from tensorflow.keras import backend as K\n'), ((12781, 12805), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[l]'], {}), '(yolo_outputs[l])\n', (12788, 12805), True, 'from tensorflow.keras import backend as K\n'), ((16215, 16287), 'tensorflow.keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['object_mask', 'raw_pred[..., 4:5]'], {'from_logits': '(True)'}), '(object_mask, raw_pred[..., 4:5], from_logits=True)\n', (16236, 16287), True, 'from tensorflow.keras import backend as K\n'), ((18650, 18681), 'tensorflow.keras.backend.cast', 'K.cast', (['object_mask', 'tf.float32'], {}), '(object_mask, tf.float32)\n', (18656, 18681), True, 'from tensorflow.keras import backend as K\n'), ((11302, 11326), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (11309, 11326), True, 'from tensorflow.keras import backend as K\n'), ((11644, 11662), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (11651, 11662), True, 'from tensorflow.keras import backend as K\n'), ((15350, 15367), 'tensorflow.keras.backend.dtype', 'K.dtype', (['true_box'], {}), '(true_box)\n', (15357, 15367), True, 'from tensorflow.keras import backend as K\n'), ((16338, 16410), 'tensorflow.keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['object_mask', 'raw_pred[..., 4:5]'], {'from_logits': '(True)'}), '(object_mask, raw_pred[..., 4:5], from_logits=True)\n', (16359, 16410), True, 'from tensorflow.keras import backend as K\n'), ((17783, 17855), 'tensorflow.keras.backend.log', 'K.log', (['(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])'], {}), '(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n', (17788, 17855), True, 'from tensorflow.keras import backend as K\n'), ((18365, 18379), 'tensorflow.keras.backend.sum', 'K.sum', (['xy_loss'], {}), '(xy_loss)\n', (18370, 18379), True, 'from tensorflow.keras import backend as K\n'), ((18402, 18416), 'tensorflow.keras.backend.sum', 'K.sum', (['wh_loss'], {}), '(wh_loss)\n', (18407, 18416), True, 'from tensorflow.keras import backend as K\n'), ((11458, 11482), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (11465, 11482), True, 'from tensorflow.keras import backend as K\n'), ((11800, 11818), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (11807, 11818), True, 'from tensorflow.keras import backend as K\n'), ((17917, 17942), 'tensorflow.keras.backend.zeros_like', 'K.zeros_like', (['raw_true_wh'], {}), '(raw_true_wh)\n', (17929, 17942), True, 'from tensorflow.keras import backend as K\n'), ((18093, 18165), 'tensorflow.keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['raw_true_xy', 'raw_pred[..., 0:2]'], {'from_logits': '(True)'}), '(raw_true_xy, raw_pred[..., 0:2], from_logits=True)\n', (18114, 18165), True, 'from tensorflow.keras import backend as K\n'), ((18300, 18342), 'tensorflow.keras.backend.square', 'K.square', (['(raw_true_wh - raw_pred[..., 2:4])'], {}), '(raw_true_wh - raw_pred[..., 2:4])\n', (18308, 18342), True, 'from tensorflow.keras import backend as K\n'), ((11610, 11634), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (11617, 11634), True, 'from tensorflow.keras import backend as K\n'), ((11955, 11973), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (11962, 11973), True, 'from tensorflow.keras import backend as K\n'), ((7785, 7796), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7794, 7796), True, 'from tensorflow.keras import backend as K\n'), ((7886, 7897), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7895, 7897), True, 'from tensorflow.keras import backend as K\n'), ((11766, 11790), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (11773, 11790), True, 'from tensorflow.keras import backend as K\n'), ((12110, 12128), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (12117, 12128), True, 'from tensorflow.keras import backend as K\n'), ((11921, 11945), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (11928, 11945), True, 'from tensorflow.keras import backend as K\n'), ((12266, 12284), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (12273, 12284), True, 'from tensorflow.keras import backend as K\n'), ((12076, 12100), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12083, 12100), True, 'from tensorflow.keras import backend as K\n'), ((12423, 12441), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (12430, 12441), True, 'from tensorflow.keras import backend as K\n'), ((12231, 12255), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12238, 12255), True, 'from tensorflow.keras import backend as K\n'), ((12580, 12598), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (12587, 12598), True, 'from tensorflow.keras import backend as K\n'), ((12735, 12753), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_true[0]'], {}), '(y_true[0])\n', (12742, 12753), True, 'from tensorflow.keras import backend as K\n'), ((12389, 12413), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12396, 12413), True, 'from tensorflow.keras import backend as K\n'), ((12546, 12570), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12553, 12570), True, 'from tensorflow.keras import backend as K\n'), ((12701, 12725), 'tensorflow.keras.backend.shape', 'K.shape', (['yolo_outputs[0]'], {}), '(yolo_outputs[0])\n', (12708, 12725), True, 'from tensorflow.keras import backend as K\n')]
|
#!/usr/bin/env python
"""Virtual filesystem module based on pyfsntfs."""
import stat
from typing import Any, Callable, Dict, Iterable, Optional, Text, Type
import pyfsntfs
from grr_response_client import client_utils
from grr_response_client.vfs_handlers import base as vfs_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# Caches pyfsntfs.volume instances.
MOUNT_CACHE = utils.TimeBasedCache()
# See
# https://github.com/libyal/libfsntfs/blob/master/documentation/New%20Technologies%20File%20System%20(NTFS).asciidoc#file_attribute_flags
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
def _GetAlternateDataStreamCaseInsensitive(
fd: pyfsntfs.file_entry, name: Text) -> Optional[pyfsntfs.data_stream]:
name = name.lower()
for data_stream in fd.alternate_data_streams:
if data_stream.name.lower() == name:
return data_stream
class NTFSFile(vfs_base.VFSHandler):
"""VFSHandler implementation based on pyfsntfs."""
supported_pathtype = rdf_paths.PathSpec.PathType.NTFS
def __init__(self,
base_fd: Optional[vfs_base.VFSHandler],
handlers: Dict[Any, Type[vfs_base.VFSHandler]],
pathspec: Optional[rdf_paths.PathSpec] = None,
progress_callback: Optional[Callable[[], None]] = None):
super().__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
# self.pathspec is initialized to a copy of base_fd
if base_fd is None:
raise ValueError("NTFS driver must have a file base.")
elif isinstance(base_fd, NTFSFile) and base_fd.IsDirectory():
self.volume = base_fd.volume
last_path = utils.JoinPath(self.pathspec.last.path, pathspec.path)
# Replace the last component with this one.
self.pathspec.Pop(-1)
self.pathspec.Append(pathspec)
self.pathspec.last.path = last_path
elif not base_fd.IsDirectory():
cache_key = base_fd.pathspec.SerializeToBytes()
try:
self.volume = MOUNT_CACHE.Get(cache_key)
except KeyError:
self.volume = pyfsntfs.volume()
self.volume.open_file_object(base_fd)
MOUNT_CACHE.Put(cache_key, self.volume)
self.pathspec.Append(pathspec)
elif base_fd.IsDirectory():
raise IOError("Base must be a file.")
self.fd = None
self.data_stream = None
# Try to open by "inode" number.
if pathspec is not None and pathspec.HasField("inode"):
# The lower 48 bits of the file_reference are the MFT index.
mft_index = pathspec.inode & ((1 << 48) - 1)
self.fd = self.volume.get_file_entry(mft_index)
# If the file_reference changed, then the MFT entry points now to
# a different file. Reopen it by path.
if self.fd is not None and self.fd.file_reference != pathspec.inode:
self.fd = None
# Try to open by path
if self.fd is None:
path = self.pathspec.last.path
path = path.replace("/", "\\")
self.fd = self.volume.get_file_entry_by_path(path)
if self.fd is None:
raise IOError("Failed to open {}".format(path))
# Determine data stream
if pathspec is not None and pathspec.HasField("stream_name"):
if pathspec.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
self.data_stream = self.fd.get_alternate_data_stream_by_name(
pathspec.stream_name)
else:
self.data_stream = _GetAlternateDataStreamCaseInsensitive(
self.fd, pathspec.stream_name)
if self.data_stream is None:
raise IOError("Failed to open data stream {} in {}.".format(
pathspec.stream_name, path))
self.pathspec.last.stream_name = self.data_stream.name
else:
if self.fd.has_default_data_stream():
self.data_stream = self.fd
# self.pathspec will be used for future access to this file.
# The name is now literal, so disable case-insensitive lookup (expensive).
self.pathspec.last.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL
# Access the file by file_reference, to skip path lookups.
self.pathspec.last.inode = self.fd.file_reference
if not self.IsDirectory():
if self.data_stream is not None:
self.size = self.data_stream.get_size()
else:
self.size = 0
def Stat(self,
ext_attrs: bool = False,
follow_symlink: bool = True) -> rdf_client_fs.StatEntry:
return self._Stat(self.fd, self.data_stream, self.pathspec.Copy())
def Read(self, length: int) -> bytes:
self.data_stream.seek(self.offset)
data = self.data_stream.read(length)
self.offset += len(data)
return data
def IsDirectory(self) -> bool:
return self.fd.has_directory_entries_index()
def ListFiles(self,
ext_attrs: bool = False) -> Iterable[rdf_client_fs.StatEntry]:
del ext_attrs # Unused.
self._CheckIsDirectory()
for entry in self.fd.sub_file_entries:
pathspec = self.pathspec.Copy()
pathspec.last.path = utils.JoinPath(pathspec.last.path, entry.name)
pathspec.last.inode = entry.file_reference
pathspec.last.options = rdf_paths.PathSpec.Options.CASE_LITERAL
data_stream = entry if entry.has_default_data_stream() else None
yield self._Stat(entry, data_stream, pathspec.Copy())
# Create extra entries for alternate data streams
for data_stream in entry.alternate_data_streams:
pathspec.last.stream_name = data_stream.name
yield self._Stat(entry, data_stream, pathspec.Copy())
def ListNames(self) -> Iterable[Text]:
self._CheckIsDirectory()
for entry in self.fd.sub_file_entries:
yield entry.name
def _CheckIsDirectory(self) -> None:
if not self.IsDirectory():
raise IOError("{} is not a directory".format(
self.pathspec.CollapsePath()))
def _Stat(
self,
entry: pyfsntfs.file_entry,
data_stream: pyfsntfs.data_stream,
pathspec: rdf_paths.PathSpec,
) -> rdf_client_fs.StatEntry:
st = rdf_client_fs.StatEntry()
st.pathspec = pathspec
st.st_atime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_access_time())
st.st_mtime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_modification_time())
st.st_btime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_creation_time())
st.st_ctime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_entry_modification_time())
if entry.has_directory_entries_index():
st.st_mode = stat.S_IFDIR
else:
st.st_mode = stat.S_IFREG
if data_stream is not None:
st.st_size = data_stream.get_size()
flags = entry.file_attribute_flags
st.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if (flags & FILE_ATTRIBUTE_READONLY) == 0:
st.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
if (flags & FILE_ATTRIBUTE_HIDDEN) == 0:
st.st_mode |= stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
return st
@classmethod
def Open(
cls,
fd: Optional[vfs_base.VFSHandler],
component: rdf_paths.PathSpec,
handlers: Dict[Any, Type[vfs_base.VFSHandler]],
pathspec: Optional[rdf_paths.PathSpec] = None,
progress_callback: Optional[Callable[[], None]] = None
) -> Optional[vfs_base.VFSHandler]:
# A Pathspec which starts with NTFS means we need to resolve the mount
# point at runtime.
if (fd is None and
component.pathtype == rdf_paths.PathSpec.PathType.NTFS and
pathspec is not None):
# We are the top level handler. This means we need to check the system
# mounts to work out the exact mount point and device we need to
# open. We then modify the pathspec so we get nested in the raw
# pathspec.
raw_pathspec, corrected_path = client_utils.GetRawDevice(component.path) # pytype: disable=attribute-error
# Insert the raw device before the component in the pathspec and correct
# the path
component.path = corrected_path
pathspec.Insert(0, component)
pathspec.Insert(0, raw_pathspec)
# Allow incoming pathspec to be given in the local system path
# conventions.
for component in pathspec:
if component.path:
component.path = client_utils.LocalPathToCanonicalPath(component.path)
# We have not actually opened anything in this iteration, but modified the
# pathspec. Next time we should be able to open it properly.
return fd
# If an inode is specified, just use it directly.
# This is necessary so that component.path is ignored.
elif component.HasField("inode"):
return NTFSFile(
fd, handlers, component, progress_callback=progress_callback)
else:
return super(NTFSFile, cls).Open(
fd=fd,
component=component,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
|
[
"grr_response_client.client_utils.GetRawDevice",
"grr_response_core.lib.utils.TimeBasedCache",
"grr_response_core.lib.rdfvalues.client_fs.StatEntry",
"grr_response_client.client_utils.LocalPathToCanonicalPath",
"grr_response_core.lib.utils.JoinPath",
"pyfsntfs.volume"
] |
[((551, 573), 'grr_response_core.lib.utils.TimeBasedCache', 'utils.TimeBasedCache', ([], {}), '()\n', (571, 573), False, 'from grr_response_core.lib import utils\n'), ((6200, 6225), 'grr_response_core.lib.rdfvalues.client_fs.StatEntry', 'rdf_client_fs.StatEntry', ([], {}), '()\n', (6223, 6225), True, 'from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs\n'), ((5197, 5243), 'grr_response_core.lib.utils.JoinPath', 'utils.JoinPath', (['pathspec.last.path', 'entry.name'], {}), '(pathspec.last.path, entry.name)\n', (5211, 5243), False, 'from grr_response_core.lib import utils\n'), ((7990, 8031), 'grr_response_client.client_utils.GetRawDevice', 'client_utils.GetRawDevice', (['component.path'], {}), '(component.path)\n', (8015, 8031), False, 'from grr_response_client import client_utils\n'), ((1873, 1927), 'grr_response_core.lib.utils.JoinPath', 'utils.JoinPath', (['self.pathspec.last.path', 'pathspec.path'], {}), '(self.pathspec.last.path, pathspec.path)\n', (1887, 1927), False, 'from grr_response_core.lib import utils\n'), ((8455, 8508), 'grr_response_client.client_utils.LocalPathToCanonicalPath', 'client_utils.LocalPathToCanonicalPath', (['component.path'], {}), '(component.path)\n', (8492, 8508), False, 'from grr_response_client import client_utils\n'), ((2280, 2297), 'pyfsntfs.volume', 'pyfsntfs.volume', ([], {}), '()\n', (2295, 2297), False, 'import pyfsntfs\n')]
|
from encapsulation_04.exe.pizza_maker.project.dough import Dough
from encapsulation_04.exe.pizza_maker.project.pizza import Pizza
from encapsulation_04.exe.pizza_maker.project.topping import Topping
tomato_topping = Topping("Tomato", 60)
print(tomato_topping.topping_type)
print(tomato_topping.weight)
mushrooms_topping = Topping("Mushroom", 75)
print(mushrooms_topping.topping_type)
print(mushrooms_topping.weight)
mozzarella_topping = Topping("Mozzarella", 80)
print(mozzarella_topping.topping_type)
print(mozzarella_topping.weight)
cheddar_topping = Topping("Cheddar", 150)
pepperoni_topping = Topping("Pepperoni", 120)
white_flour_dough = Dough("White Flour", "Mixing", 200)
print(white_flour_dough.flour_type)
print(white_flour_dough.weight)
print(white_flour_dough.baking_technique)
whole_wheat_dough = Dough("Whole Wheat Flour", "Mixing", 200)
print(whole_wheat_dough.weight)
print(whole_wheat_dough.flour_type)
print(whole_wheat_dough.baking_technique)
p = Pizza("Margherita", whole_wheat_dough, 2)
p.add_topping(tomato_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
|
[
"encapsulation_04.exe.pizza_maker.project.dough.Dough",
"encapsulation_04.exe.pizza_maker.project.pizza.Pizza",
"encapsulation_04.exe.pizza_maker.project.topping.Topping"
] |
[((217, 238), 'encapsulation_04.exe.pizza_maker.project.topping.Topping', 'Topping', (['"""Tomato"""', '(60)'], {}), "('Tomato', 60)\n", (224, 238), False, 'from encapsulation_04.exe.pizza_maker.project.topping import Topping\n'), ((324, 347), 'encapsulation_04.exe.pizza_maker.project.topping.Topping', 'Topping', (['"""Mushroom"""', '(75)'], {}), "('Mushroom', 75)\n", (331, 347), False, 'from encapsulation_04.exe.pizza_maker.project.topping import Topping\n'), ((440, 465), 'encapsulation_04.exe.pizza_maker.project.topping.Topping', 'Topping', (['"""Mozzarella"""', '(80)'], {}), "('Mozzarella', 80)\n", (447, 465), False, 'from encapsulation_04.exe.pizza_maker.project.topping import Topping\n'), ((557, 580), 'encapsulation_04.exe.pizza_maker.project.topping.Topping', 'Topping', (['"""Cheddar"""', '(150)'], {}), "('Cheddar', 150)\n", (564, 580), False, 'from encapsulation_04.exe.pizza_maker.project.topping import Topping\n'), ((602, 627), 'encapsulation_04.exe.pizza_maker.project.topping.Topping', 'Topping', (['"""Pepperoni"""', '(120)'], {}), "('Pepperoni', 120)\n", (609, 627), False, 'from encapsulation_04.exe.pizza_maker.project.topping import Topping\n'), ((649, 684), 'encapsulation_04.exe.pizza_maker.project.dough.Dough', 'Dough', (['"""White Flour"""', '"""Mixing"""', '(200)'], {}), "('White Flour', 'Mixing', 200)\n", (654, 684), False, 'from encapsulation_04.exe.pizza_maker.project.dough import Dough\n'), ((816, 857), 'encapsulation_04.exe.pizza_maker.project.dough.Dough', 'Dough', (['"""Whole Wheat Flour"""', '"""Mixing"""', '(200)'], {}), "('Whole Wheat Flour', 'Mixing', 200)\n", (821, 857), False, 'from encapsulation_04.exe.pizza_maker.project.dough import Dough\n'), ((973, 1014), 'encapsulation_04.exe.pizza_maker.project.pizza.Pizza', 'Pizza', (['"""Margherita"""', 'whole_wheat_dough', '(2)'], {}), "('Margherita', whole_wheat_dough, 2)\n", (978, 1014), False, 'from encapsulation_04.exe.pizza_maker.project.pizza import Pizza\n')]
|
import cv2
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
def to_cpu(tensor):
return tensor.detach().cpu()
def xywh2xyxy(x):
''' Convert bounding box from [x, y, w, h] to [x1, y1, x2, y2]
:param x: bounding boxes array
:return: Converted bounding box array
'''
y = x.new(x.shape)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def rescale_boxes(boxes, current_dim, original_shape):
""" Rescales bounding boxes to the original shape """
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = xywh2xyxy(prediction[..., :4])
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim = img_dim
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# If grid size does not match current we compute new offsets
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
pred_boxes=pred_boxes,
pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
# Metrics
cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * class_mask * tconf
precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"cls": to_cpu(loss_cls).item(),
"cls_acc": to_cpu(cls_acc).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, config_path, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path)
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets=None):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return yolo_outputs if targets is None else (loss, yolo_outputs)
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# Open the weights file
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
# Establish cutoff for loading backbone weights
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
def prepare_yolo(model_dir):
''' Download yolo model files and load the model weights
:param model_dir: Directory path where to store yolo model weights and yolo model configuration file.
:return: Yolo model after loading model weights
'''
cfg_file = os.path.join(model_dir, 'yolov3.cfg')
if not os.path.exists(cfg_file):
download_command = 'wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg -O ' + cfg_file
os.system(download_command)
weight_file = os.path.join(model_dir, 'yolov3.weights')
if not os.path.exists(weight_file):
download_command = 'wget https://pjreddie.com/media/files/yolov3.weights -O ' + weight_file
os.system(download_command)
yolo_model = Darknet(cfg_file, 416)
yolo_model.load_darknet_weights(weight_file)
print ('prepared yolo model')
return yolo_model
# if __name__ == '__main__':
# prepare_yolo(model_dir = '/home/face-r/Steps_face_recognition/emotic/debug/models')
|
[
"numpy.fromfile",
"torch.nn.ZeroPad2d",
"torch.nn.Sequential",
"torch.max",
"torch.exp",
"torch.min",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.array",
"torch.sum",
"torch.nn.functional.interpolate",
"torch.arange",
"os.path.exists",
"torch.nn.BatchNorm2d",
"torch.nn.ModuleList",
"torch.nn.LeakyReLU",
"torch.cat",
"torch.clamp",
"torch.sigmoid",
"os.path.join",
"torch.stack",
"torch.nn.BCELoss",
"os.system"
] |
[((1247, 1270), 'torch.max', 'torch.max', (['b1_x1', 'b2_x1'], {}), '(b1_x1, b2_x1)\n', (1256, 1270), False, 'import torch\n'), ((1288, 1311), 'torch.max', 'torch.max', (['b1_y1', 'b2_y1'], {}), '(b1_y1, b2_y1)\n', (1297, 1311), False, 'import torch\n'), ((1329, 1352), 'torch.min', 'torch.min', (['b1_x2', 'b2_x2'], {}), '(b1_x2, b2_x2)\n', (1338, 1352), False, 'import torch\n'), ((1370, 1393), 'torch.min', 'torch.min', (['b1_y2', 'b2_y2'], {}), '(b1_y2, b2_y2)\n', (1379, 1393), False, 'import torch\n'), ((5542, 5557), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5555, 5557), True, 'import torch.nn as nn\n'), ((17645, 17682), 'os.path.join', 'os.path.join', (['model_dir', '"""yolov3.cfg"""'], {}), "(model_dir, 'yolov3.cfg')\n", (17657, 17682), False, 'import os\n'), ((17878, 17919), 'os.path.join', 'os.path.join', (['model_dir', '"""yolov3.weights"""'], {}), "(model_dir, 'yolov3.weights')\n", (17890, 17919), False, 'import os\n'), ((1429, 1482), 'torch.clamp', 'torch.clamp', (['(inter_rect_x2 - inter_rect_x1 + 1)'], {'min': '(0)'}), '(inter_rect_x2 - inter_rect_x1 + 1, min=0)\n', (1440, 1482), False, 'import torch\n'), ((1485, 1538), 'torch.clamp', 'torch.clamp', (['(inter_rect_y2 - inter_rect_y1 + 1)'], {'min': '(0)'}), '(inter_rect_y2 - inter_rect_y1 + 1, min=0)\n', (1496, 1538), False, 'import torch\n'), ((5623, 5638), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (5636, 5638), True, 'import torch.nn as nn\n'), ((8171, 8235), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': 'self.scale_factor', 'mode': 'self.mode'}), '(x, scale_factor=self.scale_factor, mode=self.mode)\n', (8184, 8235), True, 'import torch.nn.functional as F\n'), ((8670, 8682), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (8680, 8682), True, 'import torch.nn as nn\n'), ((8701, 8713), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (8711, 8713), True, 'import torch.nn as nn\n'), ((10083, 10116), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 0]'], {}), '(prediction[..., 0])\n', (10096, 10116), False, 'import torch\n'), ((10135, 10168), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 1]'], {}), '(prediction[..., 1])\n', (10148, 10168), False, 'import torch\n'), ((10264, 10297), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 4]'], {}), '(prediction[..., 4])\n', (10277, 10297), False, 'import torch\n'), ((10319, 10353), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 5:]'], {}), '(prediction[..., 5:])\n', (10332, 10353), False, 'import torch\n'), ((13496, 13545), 'numpy.array', 'np.array', (['[0, 0, 0, self.seen, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0, self.seen, 0], dtype=np.int32)\n', (13504, 13545), True, 'import numpy as np\n'), ((17691, 17715), 'os.path.exists', 'os.path.exists', (['cfg_file'], {}), '(cfg_file)\n', (17705, 17715), False, 'import os\n'), ((17835, 17862), 'os.system', 'os.system', (['download_command'], {}), '(download_command)\n', (17844, 17862), False, 'import os\n'), ((17928, 17955), 'os.path.exists', 'os.path.exists', (['weight_file'], {}), '(weight_file)\n', (17942, 17955), False, 'import os\n'), ((18053, 18080), 'os.system', 'os.system', (['download_command'], {}), '(download_command)\n', (18062, 18080), False, 'import os\n'), ((4181, 4204), 'torch.stack', 'torch.stack', (['keep_boxes'], {}), '(keep_boxes)\n', (4192, 4204), False, 'import torch\n'), ((10725, 10742), 'torch.exp', 'torch.exp', (['w.data'], {}), '(w.data)\n', (10734, 10742), False, 'import torch\n'), ((10782, 10799), 'torch.exp', 'torch.exp', (['h.data'], {}), '(h.data)\n', (10791, 10799), False, 'import torch\n'), ((14289, 14315), 'torch.cat', 'torch.cat', (['yolo_outputs', '(1)'], {}), '(yolo_outputs, 1)\n', (14298, 14315), False, 'import torch\n'), ((14571, 14610), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int32', 'count': '(5)'}), '(f, dtype=np.int32, count=5)\n', (14582, 14610), True, 'import numpy as np\n'), ((14797, 14829), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float32'}), '(f, dtype=np.float32)\n', (14808, 14829), True, 'import numpy as np\n'), ((12285, 12317), 'torch.sum', 'torch.sum', (['(iou50 * detected_mask)'], {}), '(iou50 * detected_mask)\n', (12294, 12317), False, 'import torch\n'), ((12357, 12389), 'torch.sum', 'torch.sum', (['(iou50 * detected_mask)'], {}), '(iou50 * detected_mask)\n', (12366, 12389), False, 'import torch\n'), ((12431, 12463), 'torch.sum', 'torch.sum', (['(iou75 * detected_mask)'], {}), '(iou75 * detected_mask)\n', (12440, 12463), False, 'import torch\n'), ((6142, 6190), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['filters'], {'momentum': '(0.9)', 'eps': '(1e-05)'}), '(filters, momentum=0.9, eps=1e-05)\n', (6156, 6190), True, 'import torch.nn as nn\n'), ((6278, 6295), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (6290, 6295), True, 'import torch.nn as nn\n'), ((6510, 6536), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(0, 1, 0, 1)'], {}), '((0, 1, 0, 1))\n', (6522, 6536), True, 'import torch.nn as nn\n'), ((16285, 16327), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_w]'], {}), '(weights[ptr:ptr + num_w])\n', (16301, 16327), False, 'import torch\n'), ((15382, 15424), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15398, 15424), False, 'import torch\n'), ((15530, 15572), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15546, 15572), False, 'import torch\n'), ((15689, 15731), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15705, 15731), False, 'import torch\n'), ((15860, 15902), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15876, 15902), False, 'import torch\n'), ((16081, 16123), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (16097, 16123), False, 'import torch\n'), ((9113, 9128), 'torch.arange', 'torch.arange', (['g'], {}), '(g)\n', (9125, 9128), False, 'import torch\n'), ((9195, 9210), 'torch.arange', 'torch.arange', (['g'], {}), '(g)\n', (9207, 9210), False, 'import torch\n')]
|
import torch.nn as nn
import torch.optim as optim
from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k
from ltr.data import processing, sampler, LTRLoader
import ltr.models.bbreg.atom as atom_models
from ltr import actors
from ltr.trainers import LTRTrainer
import ltr.data.transforms as tfm
def run(settings):
# Most common settings are assigned in the settings struct
settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.'
settings.batch_size = 64
settings.num_workers = 8 #8
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
settings.search_area_factor = 5.0
settings.feature_sz = 18
settings.output_sz = settings.feature_sz * 16
settings.center_jitter_factor = {'train': 0, 'test': 4.5}
settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
# Train datasets
lasot_train = Lasot(settings.env.lasot_dir, split='train')
got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))
# coco_train = MSCOCOSeq(settings.env.coco_dir,version='2017')
# Validation datasets
got10k_val = Got10k(settings.env.got10k_dir, split='votval')
# The joint augmentation transform, that is applied to the pairs jointly
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))
# The augmentation transform applied to the training set (individually to each image in the pair)
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),
tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
# The augmentation transform applied to the validation set (individually to each image in the pair)
transform_val = tfm.Transform(tfm.ToTensor(),
tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
# Data processing to do on the training pairs
proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}
data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
output_sz=settings.output_sz,
center_jitter_factor=settings.center_jitter_factor,
scale_jitter_factor=settings.scale_jitter_factor,
mode='sequence',
proposal_params=proposal_params,
transform=transform_train,
joint_transform=transform_joint)
# Data processing to do on the validation pairs
data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
output_sz=settings.output_sz,
center_jitter_factor=settings.center_jitter_factor,
scale_jitter_factor=settings.scale_jitter_factor,
mode='sequence',
proposal_params=proposal_params,
transform=transform_val,
joint_transform=transform_joint)
# The sampler for training
dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train], [1,1,1],
samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)
# dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],
# samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)
# The loader for training
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
shuffle=True, drop_last=True, stack_dim=1)
# The sampler for validation
dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,
processing=data_processing_val)
dataset_val.datatype = 'val'
# The loader for validation
loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)
# Create network and actor
net = atom_models.atom_resnet18(backbone_pretrained=True)
objective = nn.MSELoss()
actor = actors.AtomActor(net=net, objective=objective)
# Optimizer
optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) #过了15epoch lr=lr*gamma
# Create trainer
trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)
# Run training (set fail_safe=False if you are debugging)
trainer.train(50, load_latest=False, fail_safe=True)
# trainer.train(50, load_latest=True, fail_safe=False)
|
[
"ltr.dataset.Got10k",
"ltr.data.processing.ATOMProcessing",
"ltr.actors.AtomActor",
"ltr.trainers.LTRTrainer",
"ltr.dataset.Lasot",
"ltr.data.transforms.ToGrayscale",
"ltr.data.transforms.Normalize",
"torch.optim.lr_scheduler.StepLR",
"ltr.models.bbreg.atom.atom_resnet18",
"ltr.data.sampler.ATOMSampler",
"torch.nn.MSELoss",
"ltr.data.LTRLoader",
"ltr.data.transforms.ToTensor",
"ltr.data.transforms.ToTensorAndJitter"
] |
[((969, 1013), 'ltr.dataset.Lasot', 'Lasot', (['settings.env.lasot_dir'], {'split': '"""train"""'}), "(settings.env.lasot_dir, split='train')\n", (974, 1013), False, 'from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\n'), ((1033, 1082), 'ltr.dataset.Got10k', 'Got10k', (['settings.env.got10k_dir'], {'split': '"""vottrain"""'}), "(settings.env.got10k_dir, split='vottrain')\n", (1039, 1082), False, 'from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\n'), ((1284, 1331), 'ltr.dataset.Got10k', 'Got10k', (['settings.env.got10k_dir'], {'split': '"""votval"""'}), "(settings.env.got10k_dir, split='votval')\n", (1290, 1331), False, 'from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\n'), ((2205, 2538), 'ltr.data.processing.ATOMProcessing', 'processing.ATOMProcessing', ([], {'search_area_factor': 'settings.search_area_factor', 'output_sz': 'settings.output_sz', 'center_jitter_factor': 'settings.center_jitter_factor', 'scale_jitter_factor': 'settings.scale_jitter_factor', 'mode': '"""sequence"""', 'proposal_params': 'proposal_params', 'transform': 'transform_train', 'joint_transform': 'transform_joint'}), "(search_area_factor=settings.search_area_factor,\n output_sz=settings.output_sz, center_jitter_factor=settings.\n center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor,\n mode='sequence', proposal_params=proposal_params, transform=\n transform_train, joint_transform=transform_joint)\n", (2230, 2538), False, 'from ltr.data import processing, sampler, LTRLoader\n'), ((2978, 3309), 'ltr.data.processing.ATOMProcessing', 'processing.ATOMProcessing', ([], {'search_area_factor': 'settings.search_area_factor', 'output_sz': 'settings.output_sz', 'center_jitter_factor': 'settings.center_jitter_factor', 'scale_jitter_factor': 'settings.scale_jitter_factor', 'mode': '"""sequence"""', 'proposal_params': 'proposal_params', 'transform': 'transform_val', 'joint_transform': 'transform_joint'}), "(search_area_factor=settings.search_area_factor,\n output_sz=settings.output_sz, center_jitter_factor=settings.\n center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor,\n mode='sequence', proposal_params=proposal_params, transform=\n transform_val, joint_transform=transform_joint)\n", (3003, 3309), False, 'from ltr.data import processing, sampler, LTRLoader\n'), ((3708, 3887), 'ltr.data.sampler.ATOMSampler', 'sampler.ATOMSampler', (['[lasot_train, got10k_train, trackingnet_train]', '[1, 1, 1]'], {'samples_per_epoch': '(1000 * settings.batch_size)', 'max_gap': '(50)', 'processing': 'data_processing_train'}), '([lasot_train, got10k_train, trackingnet_train], [1, 1, \n 1], samples_per_epoch=1000 * settings.batch_size, max_gap=50,\n processing=data_processing_train)\n', (3727, 3887), False, 'from ltr.data import processing, sampler, LTRLoader\n'), ((4198, 4365), 'ltr.data.LTRLoader', 'LTRLoader', (['"""train"""', 'dataset_train'], {'training': '(True)', 'batch_size': 'settings.batch_size', 'num_workers': 'settings.num_workers', 'shuffle': '(True)', 'drop_last': '(True)', 'stack_dim': '(1)'}), "('train', dataset_train, training=True, batch_size=settings.\n batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=\n True, stack_dim=1)\n", (4207, 4365), False, 'from ltr.data import processing, sampler, LTRLoader\n'), ((4437, 4569), 'ltr.data.sampler.ATOMSampler', 'sampler.ATOMSampler', (['[got10k_val]', '[1]'], {'samples_per_epoch': '(500 * settings.batch_size)', 'max_gap': '(50)', 'processing': 'data_processing_val'}), '([got10k_val], [1], samples_per_epoch=500 * settings.\n batch_size, max_gap=50, processing=data_processing_val)\n', (4456, 4569), False, 'from ltr.data import processing, sampler, LTRLoader\n'), ((4684, 4867), 'ltr.data.LTRLoader', 'LTRLoader', (['"""val"""', 'dataset_val'], {'training': '(False)', 'batch_size': 'settings.batch_size', 'num_workers': 'settings.num_workers', 'shuffle': '(False)', 'drop_last': '(True)', 'epoch_interval': '(5)', 'stack_dim': '(1)'}), "('val', dataset_val, training=False, batch_size=settings.\n batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=\n True, epoch_interval=5, stack_dim=1)\n", (4693, 4867), False, 'from ltr.data import processing, sampler, LTRLoader\n'), ((4927, 4978), 'ltr.models.bbreg.atom.atom_resnet18', 'atom_models.atom_resnet18', ([], {'backbone_pretrained': '(True)'}), '(backbone_pretrained=True)\n', (4952, 4978), True, 'import ltr.models.bbreg.atom as atom_models\n'), ((4995, 5007), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5005, 5007), True, 'import torch.nn as nn\n'), ((5020, 5066), 'ltr.actors.AtomActor', 'actors.AtomActor', ([], {'net': 'net', 'objective': 'objective'}), '(net=net, objective=objective)\n', (5036, 5066), False, 'from ltr import actors\n'), ((5176, 5237), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(15)', 'gamma': '(0.2)'}), '(optimizer, step_size=15, gamma=0.2)\n', (5201, 5237), True, 'import torch.optim as optim\n'), ((5297, 5382), 'ltr.trainers.LTRTrainer', 'LTRTrainer', (['actor', '[loader_train, loader_val]', 'optimizer', 'settings', 'lr_scheduler'], {}), '(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler\n )\n', (5307, 5382), False, 'from ltr.trainers import LTRTrainer\n'), ((1446, 1479), 'ltr.data.transforms.ToGrayscale', 'tfm.ToGrayscale', ([], {'probability': '(0.05)'}), '(probability=0.05)\n', (1461, 1479), True, 'import ltr.data.transforms as tfm\n'), ((1620, 1646), 'ltr.data.transforms.ToTensorAndJitter', 'tfm.ToTensorAndJitter', (['(0.2)'], {}), '(0.2)\n', (1641, 1646), True, 'import ltr.data.transforms as tfm\n'), ((1684, 1755), 'ltr.data.transforms.Normalize', 'tfm.Normalize', ([], {'mean': 'settings.normalize_mean', 'std': 'settings.normalize_std'}), '(mean=settings.normalize_mean, std=settings.normalize_std)\n', (1697, 1755), True, 'import ltr.data.transforms as tfm\n'), ((1896, 1910), 'ltr.data.transforms.ToTensor', 'tfm.ToTensor', ([], {}), '()\n', (1908, 1910), True, 'import ltr.data.transforms as tfm\n'), ((1946, 2017), 'ltr.data.transforms.Normalize', 'tfm.Normalize', ([], {'mean': 'settings.normalize_mean', 'std': 'settings.normalize_std'}), '(mean=settings.normalize_mean, std=settings.normalize_std)\n', (1959, 2017), True, 'import ltr.data.transforms as tfm\n')]
|
""" Activity list window """
import tkinter
import tkinter.ttk
from model import activity, invoice
from model.activity import Activity
from model.company import Company
from gui.activity import ActivityWindow
from gui.activity_split import ActivitySplit
from gui.invoice import InvoiceWindow
from gui.popup_file import popup_email
from gui.prime_singleton import PrimeSingleton
from util import activity_xlsx_report, backup, date_time
import config
class ActivityListWindow(tkinter.Toplevel):
""" Activity list window """
_BUTTON_WIDTH = 150
_WINDOW_WIDTH = 1200
_WINDOW_HEIGHT = 400
_Y_SPACING = 10
def __init__(self):
# Initialization
tkinter.Toplevel.__init__(self)
self.wm_geometry(str(self._WINDOW_WIDTH) + "x" + str(self._WINDOW_HEIGHT))
# Build tree
self._tree = tkinter.ttk.Treeview(self)
tree_height = self._WINDOW_HEIGHT - config.CONSTANTS["GUI_CELL_HEIGHT"] - self._Y_SPACING
self._tree.place(x=0, y=0, width=self._WINDOW_WIDTH, height=tree_height)
cell_y = tree_height + self._Y_SPACING
self._tree["columns"] = ("Client", "Project", "Location", "GUID")
self._tree.heading("Client", text="Client")
self._tree.heading("Project", text="Project")
self._tree.heading("Location", text="Location")
self._tree.heading("GUID", text="GUID")
# Fill tree with data
self._activities = []
self._tree_content = {}
self._fill_tree_with_activities()
# Buttons
cell_x = 0
edit_button = tkinter.Button(self, text="Edit", command=self._edit_click)
edit_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
edit_button = tkinter.Button(self, text="Excel", command=self._excel_click)
edit_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
split_button = tkinter.Button(self, text="Split", command=self._split_click)
split_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
invoice_button = tkinter.Button(self, text="Invoice", command=self._invoice_click)
invoice_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
invoice_button = tkinter.Button(self, text="Delete", command=self._delete_click)
invoice_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
@property
def _first_selected_activity(self) -> activity.Activity:
selected_activities = self._selected_activities
if len(selected_activities) == 0:
return None
return selected_activities[0]
@property
def _selected_activities(self) -> []:
selected_activities = []
for selected_id in self._tree.selection():
selected_activity = self._tree_content[selected_id]
selected_activities.append(selected_activity)
return selected_activities
def _delete_click(self):
deletable_activities = self._selected_activities
if len(deletable_activities) == 0:
return
deletable_guids = []
for act in deletable_activities:
deletable_guids.append(act.guid)
backup.execute()
Activity.delete_activities(deletable_guids)
self._fill_tree_with_activities()
PrimeSingleton.get().refresh()
def _edit_click(self):
first_selected_activity = self._first_selected_activity
if first_selected_activity is None:
return
activity_window = ActivityWindow()
activity_window.fill_with_activity(first_selected_activity)
self.after(1, self.destroy())
activity_window.mainloop()
def _excel_click(self):
selected_activity_objects = self._selected_activities
xlsx_report = activity_xlsx_report.Report()
xlsx_report.generate_with_activity_objects(selected_activity_objects)
activity_company = Company(config.CONSTANTS["COMPANY_NAME_1E1"])
popup_email(recipients=activity_company.activity_emails,
subject="Bu ayki aktivitelerim",
attachment=xlsx_report.last_saved_files[0])
def _fill_tree_with_activities(self):
self._activities = Activity.get_activities()
self._activities["activities"] = sorted(
self._activities["activities"],
key=lambda x: x["date"],
reverse=False)
self._tree_content = {}
self._tree.delete(*self._tree.get_children())
for activity_line in self._activities["activities"]:
activity_obj = activity.Activity(activity_line)
project_obj = activity_obj.project
tree_val = (
project_obj.client.name,
project_obj.name,
activity_obj.location,
activity_obj.guid
)
id_in_tree = self._tree.insert(
'',
'end',
text=date_time.get_formatted_date(activity_obj.date),
value=tree_val
)
self._tree_content[id_in_tree] = activity_obj
self.update()
def _invoice_click(self):
selected_activities = self._selected_activities
if len(selected_activities) == 0:
return
new_invoice = invoice.get_invoice_obj_from_activities(selected_activities)
invoice_window = InvoiceWindow()
invoice_window.fill_with_invoice(new_invoice, browser=True, invoice_dir=True)
invoice_window.mainloop()
def _split_click(self):
first_selected_activity = self._first_selected_activity
if first_selected_activity is None:
return
activity_split = ActivitySplit()
activity_split.fill_with_activity(first_selected_activity)
self.after(1, self.destroy())
activity_split.mainloop()
|
[
"model.company.Company",
"model.activity.Activity.delete_activities",
"gui.activity_split.ActivitySplit",
"model.activity.Activity",
"gui.invoice.InvoiceWindow",
"tkinter.Toplevel.__init__",
"tkinter.Button",
"gui.activity.ActivityWindow",
"model.activity.Activity.get_activities",
"model.invoice.get_invoice_obj_from_activities",
"util.date_time.get_formatted_date",
"util.backup.execute",
"gui.prime_singleton.PrimeSingleton.get",
"gui.popup_file.popup_email",
"tkinter.ttk.Treeview",
"util.activity_xlsx_report.Report"
] |
[((681, 712), 'tkinter.Toplevel.__init__', 'tkinter.Toplevel.__init__', (['self'], {}), '(self)\n', (706, 712), False, 'import tkinter\n'), ((839, 865), 'tkinter.ttk.Treeview', 'tkinter.ttk.Treeview', (['self'], {}), '(self)\n', (859, 865), False, 'import tkinter\n'), ((1573, 1632), 'tkinter.Button', 'tkinter.Button', (['self'], {'text': '"""Edit"""', 'command': 'self._edit_click'}), "(self, text='Edit', command=self._edit_click)\n", (1587, 1632), False, 'import tkinter\n'), ((1739, 1800), 'tkinter.Button', 'tkinter.Button', (['self'], {'text': '"""Excel"""', 'command': 'self._excel_click'}), "(self, text='Excel', command=self._excel_click)\n", (1753, 1800), False, 'import tkinter\n'), ((1908, 1969), 'tkinter.Button', 'tkinter.Button', (['self'], {'text': '"""Split"""', 'command': 'self._split_click'}), "(self, text='Split', command=self._split_click)\n", (1922, 1969), False, 'import tkinter\n'), ((2080, 2145), 'tkinter.Button', 'tkinter.Button', (['self'], {'text': '"""Invoice"""', 'command': 'self._invoice_click'}), "(self, text='Invoice', command=self._invoice_click)\n", (2094, 2145), False, 'import tkinter\n'), ((2258, 2321), 'tkinter.Button', 'tkinter.Button', (['self'], {'text': '"""Delete"""', 'command': 'self._delete_click'}), "(self, text='Delete', command=self._delete_click)\n", (2272, 2321), False, 'import tkinter\n'), ((3219, 3235), 'util.backup.execute', 'backup.execute', ([], {}), '()\n', (3233, 3235), False, 'from util import activity_xlsx_report, backup, date_time\n'), ((3244, 3287), 'model.activity.Activity.delete_activities', 'Activity.delete_activities', (['deletable_guids'], {}), '(deletable_guids)\n', (3270, 3287), False, 'from model.activity import Activity\n'), ((3552, 3568), 'gui.activity.ActivityWindow', 'ActivityWindow', ([], {}), '()\n', (3566, 3568), False, 'from gui.activity import ActivityWindow\n'), ((3823, 3852), 'util.activity_xlsx_report.Report', 'activity_xlsx_report.Report', ([], {}), '()\n', (3850, 3852), False, 'from util import activity_xlsx_report, backup, date_time\n'), ((3959, 4004), 'model.company.Company', 'Company', (["config.CONSTANTS['COMPANY_NAME_1E1']"], {}), "(config.CONSTANTS['COMPANY_NAME_1E1'])\n", (3966, 4004), False, 'from model.company import Company\n'), ((4014, 4152), 'gui.popup_file.popup_email', 'popup_email', ([], {'recipients': 'activity_company.activity_emails', 'subject': '"""Bu ayki aktivitelerim"""', 'attachment': 'xlsx_report.last_saved_files[0]'}), "(recipients=activity_company.activity_emails, subject=\n 'Bu ayki aktivitelerim', attachment=xlsx_report.last_saved_files[0])\n", (4025, 4152), False, 'from gui.popup_file import popup_email\n'), ((4258, 4283), 'model.activity.Activity.get_activities', 'Activity.get_activities', ([], {}), '()\n', (4281, 4283), False, 'from model.activity import Activity\n'), ((5341, 5401), 'model.invoice.get_invoice_obj_from_activities', 'invoice.get_invoice_obj_from_activities', (['selected_activities'], {}), '(selected_activities)\n', (5380, 5401), False, 'from model import activity, invoice\n'), ((5427, 5442), 'gui.invoice.InvoiceWindow', 'InvoiceWindow', ([], {}), '()\n', (5440, 5442), False, 'from gui.invoice import InvoiceWindow\n'), ((5744, 5759), 'gui.activity_split.ActivitySplit', 'ActivitySplit', ([], {}), '()\n', (5757, 5759), False, 'from gui.activity_split import ActivitySplit\n'), ((4619, 4651), 'model.activity.Activity', 'activity.Activity', (['activity_line'], {}), '(activity_line)\n', (4636, 4651), False, 'from model import activity, invoice\n'), ((3339, 3359), 'gui.prime_singleton.PrimeSingleton.get', 'PrimeSingleton.get', ([], {}), '()\n', (3357, 3359), False, 'from gui.prime_singleton import PrimeSingleton\n'), ((4995, 5042), 'util.date_time.get_formatted_date', 'date_time.get_formatted_date', (['activity_obj.date'], {}), '(activity_obj.date)\n', (5023, 5042), False, 'from util import activity_xlsx_report, backup, date_time\n')]
|
import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
class TestCase(unittest.TestCase):
def setUp(self):
self.testNo = TestStatus.getTestSetup(1, 5, 10, 100)
self.tempDir = getTempDirectory(os.getcwd())
self.tempFiles = []
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
system("rm -rf %s" % self.tempDir)
@TestStatus.shortLength
def testCactusCall(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open("/dev/urandom", "rb") as randText:
with open(inputFile, 'w') as fh:
fh.write(b64encode(randText.read(1024)).decode())
with open(inputFile) as fh:
input = "".join(fh.read().split("\n"))
#Send input to container's stdin through a file, get output
#from stdout
output = "".join(cactus_call(infile=inputFile, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
#Send input as string, get output from stdout
output = "".join(cactus_call(stdin_string=input, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
@TestStatus.shortLength
def testCactusCallPipes(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open(inputFile, 'w') as f:
f.write('foobar\n')
# using 'cat' here rather than infile is intentional; it tests
# whether the directory is mounted into containers correctly.
output = cactus_call(parameters=[['cat', inputFile],
['sed', 's/foo/baz/g'],
['awk', '{ print "quux" $0 }']],
check_output=True)
self.assertEqual(output, 'quuxbazbar\n')
@TestStatus.mediumLength
def testChildTreeJob(self):
"""Check that the ChildTreeJob class runs all children."""
numChildren = 100
flagDir = getTempDirectory()
options = Job.Runner.getDefaultOptions(getTempDirectory())
shutil.rmtree(options.jobStore)
with Toil(options) as toil:
toil.start(CTTestParent(flagDir, numChildren))
# Check that all jobs ran
for i in range(numChildren):
self.assertTrue(os.path.exists(os.path.join(flagDir, str(i))))
shutil.rmtree(flagDir)
class CTTestParent(ChildTreeJob):
def __init__(self, flagDir, numChildren):
self.flagDir = flagDir
self.numChildren = numChildren
super(CTTestParent, self).__init__()
def run(self, fileStore):
for i in range(self.numChildren):
self.addChild(CTTestChild(self.flagDir, i))
class CTTestChild(Job):
def __init__(self, flagDir, index):
self.flagDir = flagDir
self.index = index
super(CTTestChild, self).__init__()
def run(self, fileStore):
# Mark that this job has run using a flag file
path = os.path.join(self.flagDir, str(self.index))
with open(path, 'w') as f:
# Empty file
f.write('')
if __name__ == '__main__':
unittest.main()
|
[
"sonLib.bioio.getTempFile",
"sonLib.bioio.system",
"cactus.shared.common.cactus_call",
"sonLib.bioio.TestStatus.getTestSetup",
"shutil.rmtree",
"os.getcwd",
"toil.common.Toil",
"sonLib.bioio.getTempDirectory",
"unittest.TestCase.tearDown",
"unittest.main",
"unittest.TestCase.setUp"
] |
[((3535, 3550), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3548, 3550), False, 'import unittest\n'), ((409, 447), 'sonLib.bioio.TestStatus.getTestSetup', 'TestStatus.getTestSetup', (['(1)', '(5)', '(10)', '(100)'], {}), '(1, 5, 10, 100)\n', (432, 447), False, 'from sonLib.bioio import TestStatus\n'), ((537, 566), 'unittest.TestCase.setUp', 'unittest.TestCase.setUp', (['self'], {}), '(self)\n', (560, 566), False, 'import unittest\n'), ((600, 632), 'unittest.TestCase.tearDown', 'unittest.TestCase.tearDown', (['self'], {}), '(self)\n', (626, 632), False, 'import unittest\n'), ((641, 675), 'sonLib.bioio.system', 'system', (["('rm -rf %s' % self.tempDir)"], {}), "('rm -rf %s' % self.tempDir)\n", (647, 675), False, 'from sonLib.bioio import system\n'), ((755, 788), 'sonLib.bioio.getTempFile', 'getTempFile', ([], {'rootDir': 'self.tempDir'}), '(rootDir=self.tempDir)\n', (766, 788), False, 'from sonLib.bioio import getTempFile\n'), ((1664, 1697), 'sonLib.bioio.getTempFile', 'getTempFile', ([], {'rootDir': 'self.tempDir'}), '(rootDir=self.tempDir)\n', (1675, 1697), False, 'from sonLib.bioio import getTempFile\n'), ((1928, 2051), 'cactus.shared.common.cactus_call', 'cactus_call', ([], {'parameters': '[[\'cat\', inputFile], [\'sed\', \'s/foo/baz/g\'], [\'awk\', \'{ print "quux" $0 }\']]', 'check_output': '(True)'}), '(parameters=[[\'cat\', inputFile], [\'sed\', \'s/foo/baz/g\'], [\'awk\',\n \'{ print "quux" $0 }\']], check_output=True)\n', (1939, 2051), False, 'from cactus.shared.common import cactus_call, ChildTreeJob\n'), ((2381, 2399), 'sonLib.bioio.getTempDirectory', 'getTempDirectory', ([], {}), '()\n', (2397, 2399), False, 'from sonLib.bioio import getTempDirectory\n'), ((2476, 2507), 'shutil.rmtree', 'shutil.rmtree', (['options.jobStore'], {}), '(options.jobStore)\n', (2489, 2507), False, 'import shutil\n'), ((2759, 2781), 'shutil.rmtree', 'shutil.rmtree', (['flagDir'], {}), '(flagDir)\n', (2772, 2781), False, 'import shutil\n'), ((488, 499), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (497, 499), False, 'import os\n'), ((2448, 2466), 'sonLib.bioio.getTempDirectory', 'getTempDirectory', ([], {}), '()\n', (2464, 2466), False, 'from sonLib.bioio import getTempDirectory\n'), ((2522, 2535), 'toil.common.Toil', 'Toil', (['options'], {}), '(options)\n', (2526, 2535), False, 'from toil.common import Toil\n'), ((1156, 1244), 'cactus.shared.common.cactus_call', 'cactus_call', ([], {'infile': 'inputFile', 'check_output': '(True)', 'parameters': "['docker_test_script']"}), "(infile=inputFile, check_output=True, parameters=[\n 'docker_test_script'])\n", (1167, 1244), False, 'from cactus.shared.common import cactus_call, ChildTreeJob\n'), ((1411, 1501), 'cactus.shared.common.cactus_call', 'cactus_call', ([], {'stdin_string': 'input', 'check_output': '(True)', 'parameters': "['docker_test_script']"}), "(stdin_string=input, check_output=True, parameters=[\n 'docker_test_script'])\n", (1422, 1501), False, 'from cactus.shared.common import cactus_call, ChildTreeJob\n')]
|
"""
Example of usage of the AVB framework to infer a single exponential decay
model.
This uses the Python classes directly to infer the parameters for a single
instance of noisy data constructed as a Numpy array.
"""
import sys
import logging
import numpy as np
from vaby_avb import Avb
import vaby
# Uncomment line below to start the random number generator off with the same seed value
# each time, for repeatable results
#np.random.seed(0)
# Ground truth parameters
PARAMS_TRUTH = [42, 0.5]
NOISE_PREC_TRUTH = 0.1
NOISE_VAR_TRUTH = 1/NOISE_PREC_TRUTH
NOISE_STD_TRUTH = np.sqrt(NOISE_VAR_TRUTH)
print("Ground truth: a=%f, r=%f, noise=%f (precision)" % (PARAMS_TRUTH[0], PARAMS_TRUTH[1], NOISE_PREC_TRUTH))
# Create single exponential model
model = vaby.get_model_class("exp")(None)
# Observed data samples are generated by Numpy from the ground truth
# Gaussian distribution. Reducing the number of samples should make
# the inference less 'confident' - i.e. the output variances for
# MU and BETA will increase
N = 100
DT = 0.02
t = np.array([float(t)*DT for t in range(N)])
DATA_CLEAN = model.evaluate(PARAMS_TRUTH, t).numpy()
DATA_NOISY = DATA_CLEAN + np.random.normal(0, NOISE_STD_TRUTH, [N])
print("Time values:")
print(t)
print("Data samples (clean):")
print(DATA_CLEAN)
print("Data samples (noisy):")
print(DATA_NOISY)
# Run Fabber as a comparison if desired
#import os
#import nibabel as nib
#niidata = DATA_NOISY.reshape((1, 1, 1, N))
#nii = nib.Nifti1Image(niidata, np.identity(4))
#nii.to_filename("data_noisy.nii.gz")
#os.system("fabber_exp --data=data_noisy --print-free-energy --output=fabberout --dt=%.3f --model=exp --num-exps=1 --method=vb --noise=white --overwrite --debug" % DT)
# Log to stdout
logging.getLogger().setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(levelname)s : %(message)s'))
logging.getLogger().addHandler(handler)
# Run AVB inference
avb = Avb(t, vaby.DataModel(DATA_NOISY), model)
avb.run(method="leastsq", maxits=20, learning_rate=0.1, debug="--debug" in sys.argv)
|
[
"numpy.random.normal",
"logging.getLogger",
"logging.StreamHandler",
"numpy.sqrt",
"logging.Formatter",
"vaby.DataModel",
"vaby.get_model_class"
] |
[((577, 601), 'numpy.sqrt', 'np.sqrt', (['NOISE_VAR_TRUTH'], {}), '(NOISE_VAR_TRUTH)\n', (584, 601), True, 'import numpy as np\n'), ((1777, 1810), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1798, 1810), False, 'import logging\n'), ((755, 782), 'vaby.get_model_class', 'vaby.get_model_class', (['"""exp"""'], {}), "('exp')\n", (775, 782), False, 'import vaby\n'), ((1163, 1204), 'numpy.random.normal', 'np.random.normal', (['(0)', 'NOISE_STD_TRUTH', '[N]'], {}), '(0, NOISE_STD_TRUTH, [N])\n', (1179, 1204), True, 'import numpy as np\n'), ((1832, 1880), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s : %(message)s"""'], {}), "('%(levelname)s : %(message)s')\n", (1849, 1880), False, 'import logging\n'), ((1956, 1982), 'vaby.DataModel', 'vaby.DataModel', (['DATA_NOISY'], {}), '(DATA_NOISY)\n', (1970, 1982), False, 'import vaby\n'), ((1724, 1743), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1741, 1743), False, 'import logging\n'), ((1882, 1901), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1899, 1901), False, 'import logging\n')]
|
from dialog_api.users_pb2 import RequestLoadFullUsers, ResponseLoadFullUsers, FullUser
class Users:
def LoadFullUsers(self, request: RequestLoadFullUsers) -> ResponseLoadFullUsers:
return ResponseLoadFullUsers(full_users=[FullUser(id=1, contact_info=[], about=None)])
|
[
"dialog_api.users_pb2.FullUser"
] |
[((236, 279), 'dialog_api.users_pb2.FullUser', 'FullUser', ([], {'id': '(1)', 'contact_info': '[]', 'about': 'None'}), '(id=1, contact_info=[], about=None)\n', (244, 279), False, 'from dialog_api.users_pb2 import RequestLoadFullUsers, ResponseLoadFullUsers, FullUser\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.