metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jinbow/Octupus",
"score": 2
} |
#### File: Octupus/scripts/gen_simple_velocity.py
```python
import numpy as np
import scipy as sp
import pylab as plt
def gen_grid(nx,ny,nz):
i_f=np.arange(nx)
i_c=np.arange(nx)+0.5
j_f=np.arange(ny)
j_c=np.arange(ny)+0.5
dxx=dyy=2e3
dx=np.ones((ny,nx))*dxx
dy=np.ones((ny,nx))*dyy
x_f=i_f*dxx
x_c=i_c*dxx
y_f=j_f*dxx
y_c=j_c*dxx
psi0=1e4
x0=nx/2.0*dxx
y0=ny/2.0*dyy
r = dxx * 10
xu,yu=np.meshgrid(x_f,y_c)
xv,yv=np.meshgrid(x_c,y_f)
u=psi0/r**2*(yu-x0)*np.exp((-(xu-x0)**2-(yu-y0)**2)/r**2)
v=-psi0/r**2*(xv-y0)*np.exp((-(xv-x0)**2-(yv-y0)**2)/r**2)
w=np.zeros((nz,ny,nx))
u=u*np.ones_like(w)
v=v*np.ones_like(w)
hfacc=np.ones_like(w)
drf=np.ones((nz))*10.0
hfacc.astype('>f4').tofile('hFacC.data')
dx.astype('>f4').tofile('DXG.data')
dy.astype('>f4').tofile('DYG.data')
drf.astype('>f4').tofile('DRF.data')
for i in range(20):
app='_%04i.data'%i
u.astype('>f4').tofile('UVEL'+app)
v.astype('>f4').tofile('VVEL'+app)
w.astype('>f4').tofile('WVEL'+app)
print u.max()
plt.quiver(u[0,...],v[0,...])
plt.show()
return
gen_grid(30,30,10)
```
#### File: Octupus/src/init_parti_xyz.py
```python
from numpy import *
import sys
#to run this script in commandline:
# python init_parti_xyz.py
def glider_target(npts=2):
xyz=zeros((npts,2))
xy[:,0]=400
return
def case_test(npts=2):
xyz=zeros((npts,3))
xyz[:,0]= linspace(5,6,npts) # x index 100 points
xyz[:,1]= 15 #constant y
xyz[:,2]=2 # at k=20 level, z level will be overwritten if the target_density in the namelist is larger than 0.
xyz[:,0] = 1000.0
xyz[:,1] = linspace(100,1801,npts)
xyz[:,2] = 0
xyz.T.astype('>f8').tofile('particle_init.bin') #the saving sequence should be x[:], y[:], z[:], not [x1,y1,z1],[x2,y2,z2]...
return
if __name__=='__main__':
case_test(npts=20)
``` |
{
"source": "jinbow/tutorials",
"score": 3
} |
#### File: meetings_workshops/20220330_ASAP_SWG/utils.py
```python
import requests
from pprint import pprint
CMR_OPS = 'https://cmr.earthdata.nasa.gov/search'
collection_url = 'https://cmr.earthdata.nasa.gov/search/collections'
var_url = "https://cmr.earthdata.nasa.gov/search/variables"
def find_dataset(provider='podaac',
keywords=['swot','level-2']):
"""
Find a list of collections/datasets that match all the keywords from the keywords list.
"""
import pandas as pd
if 'podaac' in provider.lower().replace('.',''):
provider='POCLOUD'
response = requests.get(collection_url,params={'cloud_hosted': 'True',
'has_granules': 'True',
'provider': provider,
'page_size':2000,},
headers={'Accept': 'application/json', } )
collections = response.json()['feed']['entry']
entries={}
entries['short_name']=[]
entries['long_name']=[]
entries['concept_id']=[]
for collection in collections:
title="%s %s %s"%(collection["short_name"],collection["dataset_id"][:97],collection["id"])
match=1
for kw in keywords:
match *= kw.lower() in title.lower()
if match==1:
entries['short_name'].append(collection["short_name"])
entries['concept_id'].append(collection["id"])
entries['long_name'].append(collection["dataset_id"])
return pd.DataFrame(entries)
def direct_s3(provider='podaac'):
import requests,s3fs
s3_cred_endpoint = {
'podaac':'https://archive.podaac.earthdata.nasa.gov/s3credentials',
'lpdaac':'https://data.lpdaac.earthdatacloud.nasa.gov/s3credentials'}
temp_creds_url = s3_cred_endpoint[provider]
creds = requests.get(temp_creds_url).json()
s3 = s3fs.S3FileSystem(anon=False,
key=creds['accessKeyId'],
secret=creds['secretAccessKey'],
token=creds['sessionToken'])
return s3
``` |
{
"source": "jinb-park/find-linux-kernel-typo",
"score": 3
} |
#### File: jinb-park/find-linux-kernel-typo/gen_typo.py
```python
import operator
import os
import sys
import re
def build_dict(dict_file, dict_obj):
f = open(dict_file, 'r')
for word in f.readlines():
word = re.sub('[^a-z]+', '', word)
dict_obj[word] = 1
f.close()
def build_new_typo_dict(typo_file, dict_obj, new_typo_dict):
f = open(typo_file, 'r')
for line in f.readlines():
if line.startswith('#') == False:
pair = re.split('\|\|', line)
pair[1] = re.sub('[^a-z]+', '', pair[1])
min_len = min(len(pair[0]), len(pair[1]))
idx = 0
ret_typo = ''
ret_correct = ''
if len(pair[0]) < len(pair[1]):
continue
for i in range(min_len):
if pair[0][i] != pair[1][i]:
idx = i
break
if len(pair[1][idx:]) < 4:
# correction
while True:
if idx <= 1:
break
idx = idx -1
substr = pair[1][idx:]
try:
value = dict_obj[substr]
ret_correct = substr
ret_typo = pair[0][idx:]
except KeyError:
continue
else:
ret_correct = pair[1][idx:]
ret_typo = pair[0][idx:]
if len(ret_correct) > 0 and len(ret_typo) > 0:
try:
value = dict_obj[ret_typo]
except KeyError:
new_typo_dict[ret_correct] = ret_typo
f.close()
def build_kernel_word(typo_file, kernel_word):
f = open(typo_file, 'r')
idx = 0
for line in f.readlines():
if line.startswith('#') == False:
pair = re.split('\|\|', line)
pair[1] = re.sub('[^a-z]+', '', pair[1])
if len(pair[0]) > 0 and len(pair[1]) > 0:
kernel_word.append(pair[1])
idx = idx + 1
f.close()
def gen_new_typo_file(typo_file, kernel_word, new_typo_dict, ret_file):
tf = open(typo_file, 'r')
f = open(ret_file, 'w')
buf = tf.read()
for i in range(len(kernel_word)):
for kn, vn in new_typo_dict.iteritems():
if kn in kernel_word[i]:
tmpstr = str(kernel_word[i])
tmpstr = tmpstr.replace(kn, vn)
if tmpstr not in buf:
if tmpstr != 'enames' and tmpstr != 'eenabled': # filter false positive typo
f.write(tmpstr + '||' + kernel_word[i] + '\n')
f.close()
tf.close()
def gen_typo(dict_file, typo_file, ret_file):
dict_obj = dict()
new_typo_dict = dict()
kernel_word = []
build_dict(dict_file, dict_obj)
build_new_typo_dict(typo_file, dict_obj, new_typo_dict)
del dict_obj
build_kernel_word(typo_file, kernel_word)
gen_new_typo_file(typo_file, kernel_word, new_typo_dict, ret_file)
if __name__ == '__main__' :
if len(sys.argv) != 4:
print 'USAGE : gen_typo.py [dictionary file] [typo file] [result file]'
else:
gen_typo(sys.argv[1], sys.argv[2], sys.argv[3])
``` |
{
"source": "jinb-park/linux_kernel_rootkit_detector",
"score": 3
} |
#### File: jinb-park/linux_kernel_rootkit_detector/lkrd_tool.py
```python
import operator
import sys
import time
from numpy import *
from lkm_parser import *
from DataSet import DataSet
from NaiveBayes import NaiveBayes
def print_usage():
print 'USAGE : lkrd_tool.py <command> [input file] [db file] [label]'
print 'Example : '
print ' lkrd_tool.py train_list rootkit.list lkrd_db.dat 1'
print ' lkrd_tool.py train rootkit.ko lkrd_db.dat 1'
print ' lkrd_tool.py inspect rootkit.ko lkrd_db.dat'
exit()
def get_data(fname, dbname):
data_str = ''
symbols = read_ksymbols(fname)
fr = open(dbname)
header = fr.readlines()[0]
header_list = header.split()
fr.close()
for i in range(len(header_list)):
if header_list[i] in symbols:
data_str += '1'
else:
data_str += '0'
data_str += ' '
return data_str
def train(fname, dbname, label):
data_str = get_data(fname, dbname)
dbfr = open(dbname, 'a')
dbfr.write(data_str)
dbfr.write(label + '\n')
dbfr.close()
def train_list(fname, dbname, label):
if label == '1':
all_symbols = set()
fr = open(fname)
for line in fr.readlines():
line = line.strip()
symbols = read_ksymbols(line)
all_symbols = all_symbols | symbols
fr.close()
all_symbols_list = sorted(all_symbols)
dbfr = open(dbname, 'w')
for i in range(len(all_symbols_list)):
dbfr.write(all_symbols_list[i])
if i == len(all_symbols_list) -1:
#dbfr.write(' ::label')
dbfr.write('\n')
else:
dbfr.write(' ')
dbfr.close()
fr = open(fname)
for line in fr.readlines():
line = line.strip()
train(line, dbname, label)
def inspect(fname, dbname):
classifier = NaiveBayes()
ds_builder = DataSet(classifier)
ds, labels, attributes = ds_builder.ReadDataSet(dbname)
#purified_ds = ds_builder.PurifyDataSet(ds)
trained_ds = classifier.TrainingDataSet(ds, labels, attributes)
#
data_str = get_data(fname, dbname)
data_set = data_str.split()
for i in range(len(data_set)):
data_set[i] = float(data_set[i])
result = classifier.InspectData(ds, trained_ds, data_set)
return result
def check_argv(sys_argv):
if len(sys_argv) < 2:
print_usage()
if sys_argv[1] == 'train_list':
if len(sys_argv) != 5:
print_usage()
train_list(sys_argv[2], sys_argv[3], sys_argv[4])
elif sys_argv[1] == 'train':
if len(sys_argv) != 5:
print_usage()
train(sys_argv[2], sys_argv[3], sys_argv[4])
elif sys_argv[1] == 'inspect':
if len(sys_argv) != 4:
print_usage()
result = inspect(sys_argv[2], sys_argv[3])
if result == 1:
print '\"' + sys_argv[2] + '\" is rootkit'
else:
print '\"' + sys_argv[2] + '\" is not rootkit'
else:
print_usage()
if __name__ == '__main__' :
check_argv(sys.argv)
exit()
```
#### File: jinb-park/linux_kernel_rootkit_detector/NaiveBayes.py
```python
import operator
from numpy import *
def ConvertDataToVec(data):
vec = [0]*len(data)*11 # [0] * num of attributes * (0.0 ~ 1.0 = 11)
attrIdx = 0
for d in data:
idx = int(attrIdx * 11) + int(d * 10)
if idx >= len(vec):
continue
vec[idx] = 1
attrIdx += 1
return vec
def GetMinMaxRanges(dataSet):
numpyArr = array(dataSet)
minValue = numpyArr.min(axis=0)
maxValue = numpyArr.max(axis=0)
ranges = maxValue - minValue
return minValue, maxValue, ranges
class NaiveBayes(object):
def TrainingDataSet(self, purifiedDataSet, labels, attributes):
notRichNum = ones(len(purifiedDataSet[0])*11)
richNum = ones(len(purifiedDataSet[0])*11)
notRichDenom = 2.0
richDenom = 2.0
notRichCount = 0
richCount = 0
idx = 0
for data in purifiedDataSet:
vec = ConvertDataToVec(data)
if int(labels[idx]) == 0:
notRichNum += vec
notRichDenom += sum(vec)
notRichCount += 1
else:
richNum += vec
richDenom += sum(vec)
richCount += 1
idx += 1
notRichVect = log(notRichNum / notRichDenom)
richVect = log(richNum / richDenom)
notRichPercent = float(notRichCount) / (notRichCount + richCount)
trainedDataSet = []
trainedDataSet.append(notRichVect)
trainedDataSet.append(richVect)
trainedDataSet.append(notRichPercent)
return trainedDataSet
def TestDataSet(self, origDataSet, trainedDataSet, testDataSet, trainedLabels, testLabels, attributes):
notRichVect, richVect, notRichPercent = trainedDataSet[0:3]
idx = 0
errorCount = 0
totalTest = len(testDataSet)
currentTest = 0
targetPercentage = 0.1
minValue, maxValue, ranges = GetMinMaxRanges(origDataSet)
for data in testDataSet:
# normalize
#for i in range(len(data)):
# data[i] = (data[i] - minValue[i]) / (ranges[i])
# data[i] = round(data[i], 1)
vec = ConvertDataToVec(data)
pNotRich = sum(vec * notRichVect) + log(notRichPercent)
pRich = sum(vec * richVect) + log(1.0 - notRichPercent)
if pNotRich > pRich:
if int(testLabels[idx]) != 0:
errorCount += 1
else:
if int(testLabels[idx]) != 1:
errorCount += 1
currentTest += 1
if (currentTest/float(totalTest)) >= targetPercentage:
print '--- %d percent complete' % (targetPercentage * 100)
targetPercentage += 0.1
idx += 1
return errorCount
def InspectData(self, origDataSet, trainedDataSet, testData):
notRichVect, richVect, notRichPercent = trainedDataSet[0:3]
minValue, maxValue, ranges = GetMinMaxRanges(origDataSet)
vec = ConvertDataToVec(testData)
pNotRich = sum(vec * notRichVect) + log(notRichPercent)
pRich = sum(vec * richVect) + log(1.0 - notRichPercent)
if pNotRich > pRich:
return 0
else:
return 1
``` |
{
"source": "jincao2013/paperspider-manyusers",
"score": 2
} |
#### File: jincao2013/paperspider-manyusers/server.py
```python
__date__ = "Feb. 7, 2020"
import os
import sys
import time
from pytz import utc
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from paperspider.config import Config
from paperspider.spider import Arxiv, ApsPRL, ApsPRX, ApsPRB, ApsPRResearch
def test_job(job_id):
print('debug job pid={}'.format(os.getpid()))
with open('job.out', 'a') as f:
f.write('{}: job_id={} running at pid={} \n'.format(time.asctime(), job_id, os.getpid()))
f.close()
def test_run(config=None):
scheduler = BackgroundScheduler()
scheduler.add_job(test_job, kwargs={'job_id': '1'}, trigger='interval', seconds=5, id='1')
print('[INFO] job 1 added')
scheduler.add_job(test_job, kwargs={'job_id': '2'}, trigger='interval', seconds=8, id='2')
print('[INFO] job 2 added')
scheduler.add_job(test_job, kwargs={'job_id': '3'}, trigger='interval', seconds=11, id='3')
print('[INFO] job 3 added')
scheduler.start()
print('[INFO] scheduler start')
try:
while True:
print('[INFO] scheduler is sleeping ...')
time.sleep(3600)
except (KeyboardInterrupt, SystemExit):
print('[INFO] remove all jobs ...')
scheduler.remove_all_jobs()
print('[INFO] scheduler shutdown ...')
scheduler.shutdown()
print('[INFO] Exit.')
def schedule(config):
arxiv = Arxiv(config)
prl = ApsPRL(config)
prx = ApsPRX(config)
prb = ApsPRB(config)
prresearch = ApsPRResearch(config)
'''
* scheduler
'''
executors = {
'default': ThreadPoolExecutor(10),
# 'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
'coalesce': True,
# 'max_instances': 3,
}
scheduler = BackgroundScheduler(executors=executors, job_default=job_defaults, timezone=utc)
utc_hour = (6 - 8) % 24 # beijing_hour: 6
scheduler.add_job(arxiv.main, id='arxiv', name='arxiv.main', trigger='cron', day='*', hour=utc_hour, minute=0)
utc_hour = (7 - 8) % 24 # beijing_hour: 7
scheduler.add_job(prl.main, id='prl', name='prl.main', trigger='cron', day_of_week='mon,fri', hour=utc_hour, minute=0)
scheduler.add_job(prx.main, id='prx', name='prx.main', trigger='cron', month='*', day='20', hour=utc_hour, minute=15)
scheduler.add_job(prb.main, id='prb', name='prb.main', trigger='cron', day_of_week='sun', hour=utc_hour, minute=30)
scheduler.add_job(prresearch.main, id='prresearch', name='prresearch.main', trigger='cron', month='*', day='25', hour=utc_hour, minute=45)
# scheduler.add_job(arxiv.main, id='arxiv_test')
scheduler.start()
try:
while True:
time.sleep(3600)
except (KeyboardInterrupt, SystemExit):
scheduler.remove_all_jobs()
scheduler.shutdown()
def main():
usage = 'usage: python3 server.py /etc/paperspider/config.json'
try:
config_path = sys.argv[1]
except IndexError:
print(usage)
sys.exit(1)
config = Config(config_path)
schedule(config)
if __name__ == '__main__':
main()
'''
Debug
'''
# config_path = './test/config.test.json'
# config = Config(config_path)
# config = Config()
# arxiv = Arxiv(config)
# prl = Aps(config)
# arxiv.main()
# prl.main()
# c = config.c
# u = r'\xf6'
# c.execute("insert into papers (id, head_added_date, head_StrID, title) values (?,?,?,?)", (52, int(time.time()), u, " green's function"))
# config.conn.commit()
``` |
{
"source": "jin-cc/bastion-test",
"score": 2
} |
#### File: bastion/component/credential.py
```python
import json
import logging
from django.http import JsonResponse
from bastion.component.audit import OperationLog
from bastion.component.common import GetModelData, GetUserInfo
from bastion.forms import first_error_message
from bastion.forms.forms import CredentialGroupModelForm, CredentialModelForm, GroupCredentialModelForm, \
CommandGroupModelForm, GroupCommandModelForm, CommandModelForm
from bastion.models import CredentialGroupModel, CredentialModel, CredentialGroupRelationshipModel, CommandGroupModel, \
CommandGroupRelationshipModel, CommandModel, HostModel, HostCredentialRelationshipModel
from bastion.utils.decorator import sync_user_and_group
from bastion.utils.status_code import error, ErrorStatusCode, SuccessStatusCode, success
app_logging = logging.getLogger("app")
class CredentialGroup:
_get_model_data = GetModelData(CredentialGroupModel)
def get_credential_group(self, request):
kwargs = request.GET.dict()
status, message = self._get_credential_group(kwargs)
if not status:
app_logging.info(
'get_credential_group, parameter:{}, error info: {}'.format((json.dumps(kwargs)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS, message))
def _get_credential_group(self, kwargs):
id = kwargs.pop("id", None)
all_data = kwargs.pop("all_data", None)
if id:
query = CredentialGroupModel.fetch_one(id=id)
if not query:
return False, "数据不存在"
end_data = query.to_all_dict()
return True, end_data
# 全部数据
if all_data:
return self._get_model_data.get_all_data(kwargs)
# 分页数据
return self._get_model_data.get_paging_data(kwargs)
def create_credential_group(self, request):
data = json.loads(request.body)
status, message = self._create_credential_group(request, data)
if not status:
app_logging.info(
'create_credential_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "新建", "凭据分组", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_CREATE_SUCCESS, message))
def _create_credential_group(self, request, data):
credential_ssh_list = data.get("credential_ssh_list", [])
credential_password_list = data.get("credential_password_list", [])
user_name_query = GetUserInfo().get_user_info(request)
if not user_name_query:
return False, "用户不存在"
form = CredentialGroupModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
status, message = form.clean_name_unique()
if not status:
return False, message
form.cleaned_data.update({"user": user_name_query})
credential_group_query = CredentialGroupModel.create(**form.cleaned_data)
credential_list = list(set(credential_ssh_list + credential_password_list))
if credential_list:
GroupCredential()._create_group_credential(
{"credential_list": credential_list, "credential_group": credential_group_query.id})
return True, credential_group_query.to_dict()
def update_credential_group(self, request):
data = json.loads(request.body)
status, message = self._update_credential_group(data)
if not status:
app_logging.info(
'update_credential_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "修改", "凭据分组", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_UPDATE_SUCCESS, message))
def _update_credential_group(self, data):
id = data.get("id")
credential_ssh_list = data.get("credential_ssh_list", [])
credential_password_list = data.get("credential_password_list", [])
form = CredentialGroupModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
credential_group_query = CredentialGroupModel.fetch_one(id=id)
if not credential_group_query:
return False, "分组不存在"
credential_group_query.update(**form.cleaned_data)
credential_list = list(set(credential_ssh_list + credential_password_list))
if credential_list:
GroupCredential()._create_group_credential(
{"credential_list": credential_list, "credential_group": credential_group_query.id})
self.update_host_credential_group_rel(credential_group_query)
return True, credential_group_query.to_dict()
def update_host_credential_group_rel(self, credential_group_query):
host_list = list(set([host_credential_rel_query.host for host_credential_rel_query in
credential_group_query.credential_group_host.get_queryset()]))
credential_list = list(set([credential_group_rel_query.credential for credential_group_rel_query in
credential_group_query.credential_group_queryset.get_queryset()]))
from bastion.component.resource import HostCredential
for host in host_list:
credential_group_rel_queryset = credential_group_query.credential_group_queryset.get_queryset()
for credential_group_rel_query in credential_group_rel_queryset:
HostCredential()._save_host_credential(
{"host": host.id, "credential": credential_group_rel_query.credential.id,
"credential_group": credential_group_query.id})
for host_credential_rel_query in host.host_credential_or_credential_group.get_queryset():
if host_credential_rel_query.credential and host_credential_rel_query.credential_group == credential_group_query:
if host_credential_rel_query.credential not in credential_list:
host_credential_rel_query.delete()
return True, ""
def delete_credential_group(self, request):
data = json.loads(request.body)
status, message = self._delete_credential_group(data)
if not status:
app_logging.info(
'delete_credential_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "删除", "凭据分组", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_DELETE_SUCCESS, message))
def _delete_credential_group(self, data):
id, id_list = data.get("id", None), data.get("id_list", None)
if id:
id_list = [id]
# id_list = data if isinstance(data, list) else [data]
for credential in id_list:
credential_group_query = CredentialGroupModel.fetch_one(id=credential)
if not credential_group_query:
if id:
return False, "凭据分组不存在"
# credential_query = CredentialGroupRelationshipModel.fetch_one(credential_group=credential_group_query)
# if credential_query:
# if id: return False, "分组下有关联凭据无法删除"
credential_group_query.delete()
return True, ""
class Credential:
_get_model_data = GetModelData(CredentialModel)
def get_credential(self, request):
kwargs = request.GET.dict()
# 全部数据
# credential_type = kwargs.get("credential_type")
# if not credential_type and not kwargs.get("id"):
# return JsonResponse(error(ErrorStatusCode.MUST_INPUT_MESSAGE))
status, message = self._get_credential(kwargs, request)
if not status:
app_logging.info('get_credential, parameter:{}, error info: {}'.format((json.dumps(kwargs)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS, message))
def _get_credential(self, kwargs, request=None):
id = kwargs.pop("id", None)
host_id = kwargs.pop("host_id", None)
user_query = GetUserInfo().get_user_info(request)
if not user_query:
return False, "用户不存在"
if user_query.role != 1:
return self._general_get_credential_data(kwargs, user_query, host_id)
if id:
return self._get_model_data.get_one_data(id)
if host_id:
# 管理员获取当前主机全部凭据
host_query = HostModel.fetch_one(id=host_id)
if not host_query:
return False, "主机不存在"
host_all_credential_queryset = host_query.get_all_credential_queryset()
return True, [i.to_base_dict() for i in host_all_credential_queryset]
all_data = kwargs.pop("all_data", None)
# 全部数据
if all_data:
return self._get_model_data.get_all_data(kwargs)
# 分页数据
return self._get_model_data.get_paging_data(kwargs)
def _general_get_credential_data(self, kwargs, user_query, host_id):
all_data = kwargs.pop("all_data", None)
credential_queryset = user_query.get_user_credential_queryset()
credential_id_list = [credential.id for credential in credential_queryset]
if host_id:
real_credential_queryset = list()
host_query = HostModel.fetch_one(id=host_id)
if not host_query:
return False, "主机不存在"
host_all_credential_queryset = host_query.get_all_credential_queryset()
for credential_query in credential_queryset:
if credential_query in host_all_credential_queryset:
real_credential_queryset.append(credential_query)
return True, [i.to_base_dict() for i in real_credential_queryset]
kwargs["id__in"] = credential_id_list
search_type, search_data, _ = kwargs.pop("search_type", None), kwargs.pop("search_data", None), kwargs.pop(
"total", None)
if search_type and search_data:
kwargs[search_type + "__contains"] = search_data
try:
current, pageSize = int(kwargs.pop("current")), int(kwargs.pop("pageSize"))
except Exception:
current, pageSize = 1, 10
if all_data:
end_data = CredentialModel.fetch_all(**kwargs)
return True, [i.to_base_dict() for i in end_data]
current_page, total = CredentialModel.pagination(current, pageSize, **kwargs)
end_data = [i.to_base_dict() for i in current_page]
res_data = {
"current": current,
"pageSize": pageSize,
"total": total,
"data": end_data
}
return True, res_data
def create_credential(self, request):
data = json.loads(request.body)
status, message = self._create_credential(request, data)
if not status:
app_logging.info('create_credential, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "新建", "凭据", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_CREATE_SUCCESS, message))
def _create_credential(self, request, data):
"""
create credential
:param data:
{
"name": "凭据1",
"login_type": "auto",
"credential_type": "password",
"login_name": "root",
"login_password": "<PASSWORD>",
"credential_group": 1,
"description": "description"
}
:return:
{
"code": 200,
"successcode": 20007,
"message": "相关信息更新成功",
"data": {
"id": 1,
"name": "凭据1",
"login_type": "auto",
"credential_type": "password",
"login_name": "root",
"login_password": "123456",
"credential_group": 1,
"description": "description"
}
}
"""
user_name_query = GetUserInfo().get_user_info(request)
host_list = data.get("host_list", [])
if not user_name_query:
return False, "用户不存在"
form = CredentialModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
status, message = form.clean_name_unique()
if not status:
return False, message
form.cleaned_data.update({"user": user_name_query})
credential_query = CredentialModel.create(**form.cleaned_data)
if host_list:
from bastion.component.resource import HostCredential
HostCredential()._create_host_credential({"credential": credential_query.id, "host_list": host_list})
return True, credential_query.to_dict()
def update_credential(self, request):
data = json.loads(request.body)
status, message = self._update_credential(data)
if not status:
app_logging.info('update_credential, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "修改", "凭据", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_UPDATE_SUCCESS, message))
def _update_credential(self, data):
"""
修改凭据
:param data:
{
"id": 1,
"name": "凭据1",
"login_type": "auto",
"credential_type": "password",
"login_name": "root",
"login_password": "<PASSWORD>",
"credential_group": 1,
"description": "description"
}
:return:
{
"code": 200,
"successcode": 20007,
"message": "相关信息更新成功",
"data": {
"id": 1,
"name": "凭据1",
"login_type": "auto",
"credential_type": "password",
"login_name": "root",
"login_password": "<PASSWORD>",
"credential_group": 1,
"description": "description"
}
}
"""
id = data.get("id")
host_list = data.get("host_list", [])
form = CredentialModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
cleaned_data = form.cleaned_data
credential_type = cleaned_data.pop("credential_type", None)
credential_query = CredentialModel.fetch_one(id=id, credential_type=credential_type)
if not credential_query:
return False, "凭据不存在"
credential_query.update(**cleaned_data)
if host_list:
from bastion.component.resource import HostCredential
HostCredential()._create_host_credential({"credential": credential_query.id, "host_list": host_list})
else:
HostCredentialRelationshipModel.objects.filter(credential=credential_query, credential_group=None).delete()
return True, credential_query.to_dict()
def delete_credential(self, request):
data = json.loads(request.body)
status, message = self._delete_credential(data)
if not status:
app_logging.info('delete_credential, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "删除", "凭据", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_DELETE_SUCCESS))
def _delete_credential(self, data):
"""
删除凭据
:param id: int, id_list list
:return:
"""
id, id_list = data.get("id", None), data.get("id_list", None)
if id:
id_list = [id]
for id in id_list:
credential_query = CredentialModel.fetch_one(id=id)
if not credential_query:
if id:
return False, "凭据不存在"
credential_query.delete()
return True, ""
class GroupCredential:
def get_group_credential(self, request):
data = request.GET.dict()
id = data.get("id")
relationship = CredentialGroupRelationshipModel.fetch_one(id=id)
if relationship:
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS, relationship.to_dict()))
app_logging.info(
'get_group_credential, parameter:{}, error info: {}'.format((json.dumps(data)), str("DATA_NOT_EXISTED")))
return JsonResponse(error(ErrorStatusCode.DATA_NOT_EXISTED))
def create_group_credential(self, request):
data = json.loads(request.body)
status, message = self._create_group_credential(data)
if not status:
app_logging.info(
'create_group_credential, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "新建", "凭据分组关联", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_CREATE_SUCCESS))
def _create_group_credential(self, data):
credential_group = data.get("credential_group")
credential_list = data.get("credential_list", [])
credential_group_query = CredentialGroupModel.fetch_one(id=credential_group)
if credential_group_query:
for credential_id in credential_list:
form = GroupCredentialModelForm({"credential": credential_id, "credential_group": credential_group})
if not form.is_valid():
continue
CredentialGroupRelationshipModel.create(**form.cleaned_data)
self._delete_old_data("credential_group", credential_group, "credential", credential_list)
return True, ""
return False, "凭据分组不存在"
def _delete_old_data(self, old_field, old_query_id, new_field, new_query_list):
# 删除old_queryset中不在new_query_id_list内的对象
old_dic = {old_field: old_query_id, new_field + "__isnull": False}
old_queryset = CredentialGroupRelationshipModel.fetch_all(**old_dic)
new_dic = {old_field: old_query_id, new_field + "__in": new_query_list}
new_queryset = CredentialGroupRelationshipModel.fetch_all(**new_dic)
try:
for old_relationship in old_queryset:
if old_relationship not in new_queryset:
old_relationship.delete()
return True, "success"
except Exception as e:
return False, str(e)
def delete_group_credential(self, request):
data = json.loads(request.body)
status, message = self._delete_group_credential(data)
if not status:
app_logging.info(
'delete_group_credential, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "删除", "凭据分组关联", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_DELETE_SUCCESS))
def _delete_group_credential(self, data):
credential = data.get("credential")
credential_group = data.get("credential_group")
relationship = CredentialGroupRelationshipModel.fetch_one(credential=credential,
credential_group=credential_group)
if relationship:
relationship.delete()
HostCredentialRelationshipModel.objects.filter(credential=credential,
credential_group=credential_group).delete()
return True, ""
return False, "数据不存在"
class CommandGroup:
_get_model_data = GetModelData(CommandGroupModel)
def get_command_group(self, request):
kwargs = request.GET.dict()
status, message = self._get_command_group(kwargs)
if not status:
app_logging.info('get_command_group, parameter:{}, error info: {}'.format((json.dumps(kwargs)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS, message))
def create_command_group(self, request):
data = json.loads(request.body)
status, message = self._create_command_group(request, data)
if not status:
app_logging.info('create_command_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "新建", "命令分组", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_CREATE_SUCCESS, message))
def update_command_group(self, request):
data = json.loads(request.body)
status, message = self._update_command_group(request, data)
if not status:
app_logging.info('update_command_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "修改", "命令分组", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_UPDATE_SUCCESS, message))
def delete_command_group(self, request):
data = json.loads(request.body)
status, message = self._delete_command_group(data)
if not status:
app_logging.info('delete_command_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "删除", "命令分组", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_DELETE_SUCCESS))
def _get_command_group(self, kwargs):
id = kwargs.pop("id", None)
all_data = kwargs.pop("all_data", None)
if id:
query = CommandGroupModel.fetch_one(id=id)
if not query:
return False, "数据不存在"
end_data = query.to_all_dict()
return True, end_data
# 全部数据
if all_data:
return self._get_model_data.get_all_data(kwargs)
# 分页数据
return self._get_model_data.get_paging_data(kwargs)
def _create_command_group(self, request, data):
command_list = data.get("command_list", [])
user_name_query = GetUserInfo().get_user_info(request)
if not user_name_query:
return False, "用户不存在"
form = CommandGroupModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
status, message = form.clean_name_unique()
if not status:
return False, message
form.cleaned_data.update({"user": user_name_query})
command_group_query = CommandGroupModel.create(**form.cleaned_data)
if command_list:
GroupCommand()._create_group_command_group(
{"command_list": command_list, "command_group": command_group_query.id})
return True, command_group_query.to_all_dict()
def _update_command_group(self, request, data):
id = data.get("id")
command_list = data.get("command_list", [])
command_group_query = CommandGroupModel.fetch_one(id=id)
form = CommandGroupModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
command_group_query.update(**form.cleaned_data)
if command_list:
GroupCommand()._create_group_command_group(
{"command_group": command_group_query.id, "command_list": command_list})
return True, command_group_query.to_all_dict()
def _delete_command_group(self, data):
id, id_list = data.get("id", None), data.get("id_list", None)
if id:
id_list = [id]
for id in id_list:
credential_query = CommandGroupModel.fetch_one(id=id)
if not credential_query:
if id:
return False, "凭据不存在"
credential_query.delete()
return True, ""
class Command:
_get_model_data = GetModelData(CommandModel)
def get_command(self, request):
kwargs = request.GET.dict()
status, message = self._get_command(kwargs)
if not status:
app_logging.info('get_command, parameter:{}, error info: {}'.format((json.dumps(kwargs)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS, message))
def create_command(self, request):
data = json.loads(request.body)
status, message = self._create_command(request, data)
if not status:
app_logging.info('create_command, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "新建", "命令", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_CREATE_SUCCESS, message))
def update_command(self, request):
data = json.loads(request.body)
status, message = self._update_command(request, data)
if not status:
app_logging.info('update_command, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "修改", "命令", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_UPDATE_SUCCESS, message))
def delete_command(self, request):
data = json.loads(request.body)
status, message = self._delete_command(data)
if not status:
app_logging.info('delete_command, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "删除", "命令", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_DELETE_SUCCESS))
def _get_command(self, kwargs):
id = kwargs.pop("id", None)
all_data = kwargs.pop("all_data", None)
if id:
return self._get_model_data.get_one_data(id)
# 全部数据
if all_data:
return self._get_model_data.get_all_data(kwargs)
# 分页数据
return self._get_model_data.get_paging_data(kwargs)
def _create_command(self, request, data):
user_name_query = GetUserInfo().get_user_info(request)
if not user_name_query:
return False, "用户不存在"
command_group_list = data.get("command_group_list")
form = CommandModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
status, message = form.clean_command_unique()
if not status:
return False, message
form.cleaned_data.update({"user": user_name_query})
command_query = CommandModel.create(**form.cleaned_data)
if command_group_list:
GroupCommand()._create_group_command_group(
{"command": command_query.id, "command_group_list": command_group_list})
return True, command_query.to_dict()
def _update_command(self, request, data):
id = data.get("id")
command_group_list = data.get("command_group_list")
command_query = CommandModel.fetch_one(id=id)
form = CommandModelForm(data)
if not form.is_valid():
return False, first_error_message(form)
command_query.update(**form.cleaned_data)
if command_group_list:
GroupCommand()._create_group_command_group(
{"command": command_query.id, "command_group_list": command_group_list})
return True, command_query.to_dict()
def _delete_command(self, data):
id, id_list = data.get("id", None), data.get("id_list", None)
if id:
id_list = [id]
for id in id_list:
credential_query = CommandModel.fetch_one(id=id)
if not credential_query:
if id:
return False, "命令不存在"
credential_query.delete()
return True, ""
class GroupCommand:
def create_group_command_group(self, request):
data = json.loads(request.body)
status, message = self._create_group_command_group(data)
if not status:
app_logging.info(
'create_group_command_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "新建", "命令分组关联", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS, message))
def _create_group_command_group(self, data):
command, command_group_list = data.get("command"), data.get("command_group_list", [])
if command and command_group_list:
for command_group in command_group_list:
self._save_group_command_group(command, command_group)
self._delete_old_data("command", command, "command_group", command_group_list)
command_group, command_list = data.get("command_group"), data.get("command_list", [])
if command_group and command_list:
for command in command_list:
self._save_group_command_group(command, command_group)
self._delete_old_data("command_group", command_group, "command", command_list)
return True, ""
def _save_group_command_group(self, command, command_group):
form = GroupCommandModelForm({"command": command, "command_group": command_group})
if form.is_valid():
CommandGroupRelationshipModel.create(**form.cleaned_data)
return True, ""
return False, first_error_message(form)
def _delete_old_data(self, old_field, old_query_id, new_field, new_query_list):
# 删除old_queryset中不在new_query_id_list内的对象
old_dic = {old_field: old_query_id, new_field + "__isnull": False}
old_queryset = CommandGroupRelationshipModel.fetch_all(**old_dic)
new_dic = {old_field: old_query_id, new_field + "__in": new_query_list}
new_queryset = CommandGroupRelationshipModel.fetch_all(**new_dic)
try:
for old_relationship in old_queryset:
if old_relationship not in new_queryset:
old_relationship.delete()
return True, "success"
except Exception as e:
return False, str(e)
def delete_group_command(self, request):
data = json.loads(request.body)
status, message = self._delete_command_group(data)
if not status:
app_logging.info(
'create_group_command_group, parameter:{}, error info: {}'.format((json.dumps(data)), str(message)))
return JsonResponse(error(ErrorStatusCode.INPUT_ERROR, custom_message=message))
OperationLog.request_log(request, "删除", "命令分组关联", "success")
return JsonResponse(success(SuccessStatusCode.MESSAGE_DELETE_SUCCESS))
def _delete_command_group(self, data):
command_group = data.get("command_group")
command = data.get("command")
command_group_query = CommandGroupRelationshipModel.fetch_one(command_group=command_group, command=command)
if not command_group_query:
if id:
return False, "数据不存在"
command_group_query.delete()
return True, ""
class SyncUserGroup:
def sync_user_group(self, request):
res = sync_user_and_group(request)
return JsonResponse(success(SuccessStatusCode.MESSAGE_GET_SUCCESS))
```
#### File: bastion/forms/network_proxy_form.py
```python
from django.forms import ModelForm
from bastion.models import NetworkProxyModel
from bastion.utils.constants import IP_PATTERN
from bastion.utils.encryption import PasswordEncryption
class NetworkProxyModelForm(ModelForm):
class Meta:
model = NetworkProxyModel
fields = "__all__"
exclude = ["user", "linux_login_password"]
error_messages = {
'name': {'required': "名称不能为空", "max_length": "登录名最大长度不能超过255个字符"},
'linux_ip': {"max_length": "登录名最大长度不能超过150个字符"},
'linux_port': {"max_length": "登录名最大长度不能超过22个字符"},
'linux_login_name': {"max_length": "登录名最大长度不能超过50个字符"},
# 'linux_login_password': {"max_length": "登录名最大长度不能超过500个字符"},
'windows_ip': {"max_length": "登录名最大长度不能超过150个字符"},
'windows_port': {"max_length": "登录名最大长度不能超过22个字符"},
'description': {"max_length": "登录名最大长度不能超过2000个字符"},
}
# def clean(self):
# linux_params_count, windows_params_count = 0, 0
# linux_ip = self.cleaned_data.get("linux_ip")
# linux_port = self.cleaned_data.get("linux_port")
# linux_login_name = self.cleaned_data.get("linux_login_name")
# linux_login_password = self.data.get("linux_login_password")
# windows_ip = self.data.get("windows_ip")
# windows_port = self.data.get("windows_port")
# if linux_ip: linux_params_count += 1
# if linux_port: linux_params_count += 1
# if linux_login_name: linux_params_count += 1
# if linux_login_password: linux_params_count += 1
# if windows_ip: windows_params_count += 1
# if windows_port: windows_params_count += 1
# if linux_params_count == 4:
# if self.check_Chinese(linux_login_name):
# self.add_error("linux_login_name", "资源账户不支持中文")
# if linux_login_password and linux_login_password != "******":
# if len(linux_login_password) > 200:
# self.add_error("name", "密码长度超限")
# self.cleaned_data["linux_login_password"] = self._encrypt_password(linux_login_password)
# elif linux_params_count == 3 and not linux_login_password:
# pass
# elif linux_params_count == 0:
# if windows_params_count == 0:
# self.add_error("name", "至少选择一种代理")
# else:
# self.add_error("linux_ip", "当选择Linux为代理时请完整输入Linux主机地址、端口、登录名和密码等信息")
# if windows_params_count == 1:
# self.add_error("windows_ip", "当选择Windows为代理时请完整输入Windows主机地址和端口等信息")
# return self.cleaned_data
def clean(self):
linux_ip = self.cleaned_data.get("linux_ip")
linux_port = self.cleaned_data.get("linux_port")
linux_login_name = self.cleaned_data.get("linux_login_name")
linux_login_password = self.data.get("linux_login_password")
windows_ip = self.data.get("windows_ip")
windows_port = self.data.get("windows_port")
if all([linux_ip, linux_port, linux_login_name, linux_login_password]):
if self.check_Chinese(linux_login_name):
self.add_error("linux_login_name", "用户名不支持中文")
if linux_login_password and linux_login_password != "******":
if len(linux_login_password) > 200:
self.add_error("name", "密码长度超限")
self.cleaned_data["linux_login_password"] = self._encrypt_password(linux_login_password)
elif not linux_ip and not linux_port and not linux_login_name and not linux_login_password:
pass
else:
self.add_error("linux_ip", "当选择Linux为代理时请完整输入Linux主机地址、端口、登录名和密码等信息")
if all([windows_ip, windows_port]):
pass
elif not windows_ip and not windows_port:
pass
else:
self.add_error("windows_ip", "当选择Windows为代理时请完整输入Windows主机地址和端口等信息")
if not all([linux_ip, linux_port, linux_login_name, linux_login_password]) and not all([windows_ip, windows_port]):
self.add_error("name", "至少选择一种代理")
return self.cleaned_data
def _encrypt_password(self, password):
return PasswordEncryption().encrypt(password)
def clean_linux_port(self):
linux_port = self.data.get("linux_port")
if linux_port:
if 1 <= int(linux_port) <= 65535:
return linux_port
self.add_error('linux_port', '请指定有效范围内的端口')
return linux_port
def clean_windows_port(self):
windows_port = self.data.get("windows_port")
if windows_port:
if 1 <= int(windows_port) <= 65535:
return windows_port
self.add_error('windows_port', '请指定有效范围内的端口')
return windows_port
def clean_linux_ip(self):
linux_ip = self.cleaned_data.get('linux_ip', "")
if linux_ip:
if not IP_PATTERN.match(linux_ip):
self.add_error('linux_ip', 'IP地址不合法')
return linux_ip
def clean_windows_ip(self):
windows_ip = self.cleaned_data.get('windows_ip', "")
if windows_ip:
if not IP_PATTERN.match(windows_ip):
self.add_error('windows_ip', 'IP地址不合法')
return windows_ip
def check_Chinese(self, word):
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
return True
return False
```
#### File: bastion/migrations/0007_init_iam_action_v3.py
```python
from django.db import migrations
from bastion.utils.init_action_v3 import add_action_to_system
def run_init(apps, schema_editor):
add_action_to_system()
class Migration(migrations.Migration):
dependencies = [
('bastion', '0006_init_iam_action_v2'),
]
operations = [
migrations.RunPython(run_init)
]
```
#### File: bastion/resource/batch_views.py
```python
import json
from django.http import JsonResponse
from django.views import View
from bastion.component.batch_operation_component import CheckImport, ImportHostComponent
from bastion.utils.status_code import error, ErrorStatusCode
class BatchView(View):
def get(self, request):
return CheckImport().make_excel()
def post(self, request):
"""
excel导入,检查字数据是否可用,字段是否不合规
"""
try:
import_type = json.loads(request.body).get("import_type")
except Exception as e:
import_type = request.POST.get("import_type")
if import_type == "excel":
file = request.FILES.get("file")
return CheckImport().check_import(file, request)
elif import_type == "cmdb":
return ImportHostComponent().import_data(request)
else:
return JsonResponse(error(ErrorStatusCode.PARAMS_ERROR))
```
#### File: bastion/utils/create_iam.py
```python
import requests
def init_system_to_iam():
system_info = {
"id": "bastion",
"name": "堡垒机IAM测试",
"name_en": "bastion",
"description": "堡垒机IAM测试",
"description_en": "bastion iam test",
"clients": "bastion,",
"provider_config": {
"host": "http://bkdev-paas3.canway.net:8082/t/bastion/",
"auth": "basic",
"healthz": "/test/"
}
}
# 必须是内网
IAM_HOST = "http://bkiam.service.consul:5001"
APP_CODE = "bastion"
SECRET_KEY = "<KEY>"
API = "/api/v1/model/systems/"
URL = IAM_HOST + API
# A = "http://paas.opsany.com/bk_iam"
# URL = A + API
headers = {
"X-Bk-App-Code": APP_CODE,
"X-Bk-App-Secret": SECRET_KEY,
"Content-Type": "application/json"
}
res = requests.post(URL, headers=headers, json=system_info)
```
#### File: bastion/utils/init_command_script.py
```python
from bastion.models import CommandGroupModel, CommandModel, CommandGroupRelationshipModel
def init_command():
command_list = ["rm", "reboot", "shutdown", "init", "mkfs", "fdisk", "dd"]
command_group, _ = CommandGroupModel.objects.update_or_create(
name="危险命令",
defaults={
"description": "System default command group: Danger Command"
}
)
for command in command_list:
command_query, _ = CommandModel.objects.update_or_create(
command=command,
defaults={
"block_type": 1
}
)
CommandGroupRelationshipModel.objects.update_or_create(command=command_query, command_group=command_group)
```
#### File: components/bk_jwt/middlewares.py
```python
import logging
from django.conf import settings
from django.contrib import auth
from django.utils.deprecation import MiddlewareMixin
from blueapps.account.conf import ConfFixture
from blueapps.account.handlers.response import ResponseHandler
logger = logging.getLogger('component')
class BkJwtLoginRequiredMiddleware(MiddlewareMixin):
def process_view(self, request, view, args, kwargs):
"""
可通过登录认证的请求:
1. 带有BK JWT HEADER
2. JWT签名正确
"""
# 框架前置中间件,已将识别的客户端信息填充进 request
if not hasattr(request, 'is_bk_jwt') or not request.is_bk_jwt():
return None
logger.debug('当前请求是否经过JWT转发')
login_exempt = getattr(view, 'login_exempt', False)
# 每次请求都需要做校验
if not (login_exempt or request.user.is_authenticated):
user = auth.authenticate(request=request)
if user:
# 登录成功,确认登陆正常后退出
auth.login(request, user)
if request.user.is_authenticated:
return None
handler = ResponseHandler(ConfFixture, settings)
return handler.build_bk_jwt_401_response(request)
return None
def process_response(self, request, response):
return response
```
#### File: blueapps/conf/__init__.py
```python
class BlueSettings(object):
def __init__(self):
from django.conf import settings as django_settings
from blueapps.conf import default_settings
self._django_settings = django_settings
self._default_settings = default_settings
def __getattr__(self, key):
if key == key.upper():
if hasattr(self._django_settings, key):
return getattr(self._django_settings, key)
elif hasattr(self._default_settings, key):
return getattr(self._default_settings, key)
raise AttributeError("%r object has no attribute %r"
% (self.__class__.__name__, key))
settings = BlueSettings()
```
#### File: core/exceptions/middleware.py
```python
import json
import logging
import traceback
from django.conf import settings
from django.http import JsonResponse, Http404
from django.utils.deprecation import MiddlewareMixin
from blueapps.core.exceptions.base import BlueException
try:
from raven.contrib.django.raven_compat.models import \
sentry_exception_handler
# 兼容未有安装sentry的情况
except ImportError:
sentry_exception_handler = None
logger = logging.getLogger('blueapps')
class AppExceptionMiddleware(MiddlewareMixin):
def process_exception(self, request, exception):
"""
app后台错误统一处理
"""
self.exception = exception
self.request = request
# 用户自我感知的异常抛出
if isinstance(exception, BlueException):
logger.log(
exception.LOG_LEVEL,
(u"""捕获主动抛出异常, 具体异常堆栈->[%s] status_code->[%s] & """
u"""client_message->[%s] & args->[%s] """) % (
traceback.format_exc(),
exception.ERROR_CODE,
exception.message,
exception.args
)
)
response = JsonResponse(exception.response_data())
response.status_code = exception.STATUS_CODE
return response
# 用户未主动捕获的异常
logger.error(
(u"""捕获未处理异常,异常具体堆栈->[%s], 请求URL->[%s], """
u"""请求方法->[%s] 请求参数->[%s]""") % (
traceback.format_exc(),
request.path,
request.method,
json.dumps(getattr(request, request.method, None))
)
)
# 对于check开头函数进行遍历调用,如有满足条件的函数,则不屏蔽异常
check_funtions = self.get_check_functions()
for check_function in check_funtions:
if check_function():
return None
response = JsonResponse({
"result": False,
'code': "50000",
'message': u"系统异常,请联系管理员处理",
'data': None
})
response.status_code = 500
# notify sentry
if sentry_exception_handler is not None:
sentry_exception_handler(request=request)
return response
def get_check_functions(self):
"""获取需要判断的函数列表"""
return [getattr(self, func) for func in dir(self) if func.startswith('check') and callable(getattr(self, func))]
def check_is_debug(self):
"""判断是否是开发模式"""
return settings.DEBUG
def check_is_http404(self):
"""判断是否基于Http404异常"""
return isinstance(self.exception, Http404)
```
#### File: middleware/xss/decorators.py
```python
from django.utils.decorators import available_attrs
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
# ===============================================================================
# 转义装饰器
# ===============================================================================
def escape_exempt(view_func):
"""
转义豁免,被此装饰器修饰的action可以不进行中间件escape
"""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.escape_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def escape_script(view_func):
"""
被此装饰器修饰的action会对GET与POST参数进行javascript escape
"""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.escape_script = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def escape_url(view_func):
"""
被此装饰器修饰的action会对GET与POST参数进行url escape
"""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.escape_url = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def escape_exempt_param(*param_list, **param_list_dict):
"""
此装饰器用来豁免某个view函数的某个参数
@param param_list: 参数列表
@return:
"""
def _escape_exempt_param(view_func):
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
if param_list_dict.get('param_list'):
wrapped_view.escape_exempt_param = param_list_dict['param_list']
else:
wrapped_view.escape_exempt_param = list(param_list)
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
return _escape_exempt_param
```
#### File: blueapps/patch/log.py
```python
import os
from blueapps.conf.default_settings import BASE_DIR, APP_CODE
def get_paas_v2_logging_config_dict(is_local, bk_log_dir, log_level):
"""
日志V2对外版设置
"""
app_code = os.environ.get('APP_ID', APP_CODE)
# 设置日志文件夹路径
if is_local:
log_dir = os.path.join(os.path.dirname(BASE_DIR), 'logs', app_code)
else:
log_dir = os.path.join(os.path.join(bk_log_dir, app_code))
# 如果日志文件夹不存在则创建,日志文件存在则延用
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s \n',
},
'verbose': {
'format': '%(levelname)s [%(asctime)s] %(pathname)s '
'%(lineno)d %(funcName)s %(process)d %(thread)d '
'\n \t %(message)s \n',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'component': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(log_dir, 'component.log'),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'root': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(log_dir, '%s.log' % app_code),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5
},
'wb_mysql': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(log_dir, 'wb_mysql.log'),
'maxBytes': 1024 * 1024 * 4,
'backupCount': 5
},
},
'loggers': {
# V2旧版开发框架使用的logger
'component': {
'handlers': ['component'],
'level': 'WARNING',
'propagate': True,
},
'django': {
'handlers': ['null'],
'level': 'INFO',
'propagate': True,
},
'django.server': {
'handlers': ['console'],
'level': log_level,
'propagate': True,
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'handlers': ['wb_mysql'],
'level': log_level,
'propagate': True,
},
'root': {
'handlers': ['root'],
'level': log_level,
'propagate': True,
},
# V3新版使用的日志
'celery': {
'handlers': ['root'],
'level': log_level,
'propagate': True,
},
'blueapps': {
'handlers': ['root'],
'level': log_level,
'propagate': True,
},
'app': {
'handlers': ['root'],
'level': log_level,
'propagate': True,
}
}
}
```
#### File: blueking/tests/test_utils.py
```python
from django.test import TestCase
from blueking.component.utils import get_signature
class TestUtils(TestCase):
def test_get_signature(self):
params = {
'method': 'GET',
'path': '/blueking/component/',
'app_secret': 'test',
'params': {'p1': 1, 'p2': 'abc'},
}
signature = get_signature(**params)
self.assertEqual(signature, 'S73XVZx3HvPRcak1z3k7jUkA7FM=')
params = {
'method': 'POST',
'path': '/blueking/component/',
'app_secret': 'test',
'data': {'p1': 1, 'p2': 'abc'},
}
# python3 could sort the dict
signature = get_signature(**params)
self.assertIn(signature, ['qTzporCDYXqaWKuk/MNUXPT3A5U=', 'PnmqLk/8PVpsLHDFkolCQoi5lmg='])
```
#### File: bastion-test/index/views.py
```python
from django.shortcuts import render
# Create your views here.
from blueapps.account.decorators import login_exempt_v2
@login_exempt_v2
def vue(request):
return render(request, 'index.html')
# SITE_URL = "http://127.0.0.1:8000"
# return render(request, 'index.html', {"SITE_URL": SITE_URL})
``` |
{
"source": "jinchao-chen/deep-learning",
"score": 2
} |
#### File: unet/src/predict_script.py
```python
import pathlib
import numpy as np
import torch
from skimage.io import imread
from skimage.transform import resize
from matplotlib import pyplot as plt
from src.inference import predict
from src.transformations import normalize_01, re_normalize, create_dense_target
from src.unet import UNet
from src.utilities import get_filenames_of_path, Metrics
# preprocess function
def preprocess(img: np.ndarray):
img = np.moveaxis(img, -1, 0) # from [H, W, C] to [C, H, W]
img = normalize_01(img) # linear scaling to range [0-1]
img = np.expand_dims(img, axis=0) # add batch dimension [B, C, H, W]
img = img.astype(np.float32) # typecasting to float32
return img
# postprocess function
def postprocess(img: torch.tensor):
img = torch.topk(img, 1, dim=1)[1] # perform argmax to generate 1 channel
img = img.cpu().numpy() # send to cpu and transform to numpy.ndarray
img = np.squeeze(img) # remove batch dim and channel dim -> [H, W]
# img = re_normalize(img) # scale it to the range [0-255]
return img
def predict(img,
model,
preprocess,
postprocess,
device,
):
model.eval()
img = preprocess(img) # preprocess image
x = torch.from_numpy(img).to(device) # to torch, send to device
with torch.no_grad():
out = model(x) # send through model/network
# out_softmax = torch.softmax(out, dim=1) # perform softmax on outputs
# result = postprocess(out_softmax) # postprocess outputs
result = postprocess(out)
return result
class Predictions:
"""genralize the transformation functions to make it suitable for the other models"""
def __init__(self, images_names, targets_names, model_path):
self.images_names = images_names
self.targets_names = targets_names
self.model_path = model_path
@property
def images(self):
return [imread(img_name) for img_name in self.images_names]
@property
def targets(self):
return [create_dense_target(imread(tar_name)) for tar_name in self.targets_names]
# def transform(self, img_w):
# self.images_res = [resize(img, (img_w, img_w, 3))
# for img in self.images] # reshaped image
# resize_kwargs = {'order': 0,
# 'anti_aliasing': False, 'preserve_range': True}
# self.targets_res = [resize(tar, (img_w, img_w), **resize_kwargs)
# for tar in self.targets] # reshaped targets
def evaluate(self, model, return_all=True):
model_weights = torch.load(self.model_path)
model.load_state_dict(model_weights)
# device
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# print("The divice running is: ", device)
self.output = [predict(img, model, preprocess, postprocess, device)
for img in self.images]
met = Metrics(self.output, self.targets, return_all=return_all)
self._iou = met.iou_score
self._dice = met.dice_score
if not return_all:
print(f'iou scores: {self.score_str(self.iou)}')
print(f'dice scores: {self.score_str(self.dice)}')
else:
max_dice = np.max(self.dice, axis=0)
print(f'best score for each class: {self.score_str(max_dice)}')
@property
def iou(self):
return self._iou
@property
def dice(self):
return self._dice
@staticmethod
def predict(img,
model,
preprocess,
postprocess,
device,
):
model.eval()
img = preprocess(img) # preprocess image
x = torch.from_numpy(img).to(device) # to torch, send to device
with torch.no_grad():
out = model(x) # send through model/network
# out_softmax = torch.softmax(out, dim=1) # perform softmax on outputs
# result = postprocess(out_softmax) # postprocess outputs
result = postprocess(out)
return result
@staticmethod
def score_str(scores):
return ' | '.join(['{:.2f}'.format(x) for x in scores])
def plot(self, idxes):
for idx in idxes:
fig, axes = plt.subplots(1, 3, figsize=(16, 8))
axes[0].imshow(self.images[idx])
axes[1].imshow(self.output[idx],cmap=plt.cm.gray)
axes[2].imshow(self.targets[idx], cmap=plt.cm.gray)
axes[2].set_title(self.images_names[idx].stem)
if __name__ == '__main__':
import napari
# root directory
root = pathlib.Path.cwd() / 'data_semantic_segmentation_baseline'
# input and target files
images_names = get_filenames_of_path(root / 'imgs' / 'validation')
targets_names = get_filenames_of_path(root / 'masks' / 'validation')
# read images and store them in memory
images = [imread(img_name) for img_name in images_names]
targets = [imread(tar_name) for tar_name in targets_names]
# Resize images and targets
images_res = [resize(img, (256, 256, 3)) for img in images]
resize_kwargs = {'order': 0,
'anti_aliasing': False, 'preserve_range': True}
targets_res = [resize(tar, (256, 256), **resize_kwargs) for tar in targets]
# device
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# model
model = UNet(in_channels=3,
out_channels=3,
n_blocks=4,
start_filters=32,
activation='relu',
normalization='batch',
conv_mode='same',
dim=2).to(device)
model_name = 'seg_unet_model.pt'
model_weights = torch.load(pathlib.Path.cwd() / model_name)
model.load_state_dict(model_weights)
# predict the segmentation maps
output = [predict(img, model, preprocess, postprocess, device)
for img in images_res]
# view predictions with napari
# the t key for next does not work yet as expected
with napari.gui_qt():
viewer = napari.Viewer()
idx = 1
img_nap = viewer.add_image(images_res[idx], name='Input')
tar_nap = viewer.add_labels(targets_res[idx], name='Target')
out_nap = viewer.add_labels(output[idx], name='Prediction')
@viewer.bind_key('t')
def next_batch_training(viewer):
# idx = idx + 1
img_nap.data = images_res[3]
tar_nap.data = targets_res[3]
out_nap.data = output[3]
img_nap.name = 'Input'
tar_nap.name = 'Target'
out_nap.name = 'Prediction'
``` |
{
"source": "jincheng95/charges-tools",
"score": 3
} |
#### File: charges-tools/chargetools/shortcuts.py
```python
import glob
import os
from chargetools.exceptions import InputError
def parse_directory_for_charge(directory_path, extensions=None, base_molecule=None, **kwargs):
"""
Parse all charge assignment files within a directory into a list of :class:`entities.MoleculeWithCharge` objects.
:param directory_path: Path of directory to be searched.
:param extensions: Extensions of files to be parsed.
:param base_molecule: Base molecule for construction of all molecules.
:param kwargs: Extra keyword arguments for the instantiation method of the charge objects.
:return: A list of :class:`entities.MoleculeWithCharge` objects, parsed from files.
"""
valid_files = []
# For each possible extension, produce glob matching string, e.g. /path/*.log, /path/*.txt, etc.
if extensions is not None:
for extension in extensions:
extension = extension.lower()
if extension[0] != ".":
extension = "." + extension
query = os.path.join(directory_path, '*' + extension)
# the glob module matches * with any wild cards
# keep a list of all file paths that need parsing
valid_files += glob.glob(query)
else:
valid_files = glob.glob(directory_path + '*')
from chargetools import entities
if base_molecule is None:
ac_query = os.path.join(directory_path, '*.ac')
files = glob.glob(ac_query)[0]
if len(files) > 0:
raise InputError('Multiple .ac files found within directory. '
'Please specify base molecule, or delete duplicate .ac files from directory.')
base_molecule = entities.Molecule.from_ac_file(ac_query, **kwargs)
return [entities.MoleculeWithCharge.from_file(valid_file, base_molecule) for valid_file in valid_files]
``` |
{
"source": "JinchengHeRyan/SMIIP_Kaldi_Projects",
"score": 4
} |
#### File: SMIIP_1ST_HW_tidigits/local/data_preparation.py
```python
def create_text_file():
f = open('wav.scp', 'r')
n = open('text', 'w')
context = f.readlines()
for line in context:
file_name_full = line.split()[0]
file_name = line.split()[0].split('_')[1]
ans = ""
for char in file_name:
if char == 'A' or char == 'B':
continue
ans += word_dict()[char] + ' '
ans = ans[:-1]
n.write(file_name_full + ' ' + ans + '\n')
def word_dict():
words_dict = {'1': "1", '2': "2", '3': "3", '4': "4", '5': "5", '6': "6", '7': "7",
'8': "8",
'9': "9", 'Z': "z", 'O': "o"}
return words_dict
if __name__ == "__main__":
create_text_file()
``` |
{
"source": "JinchengHeRyan/STATS402_Final_MaskRcnn",
"score": 2
} |
#### File: STATS402_Final_MaskRcnn/pytorch_mask_rcnn/engine.py
```python
import time
import sys
import torch
from .utils import Meter, TextArea
try:
from .datasets import CocoEvaluator, prepare_for_coco
except:
pass
class AverageVal(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train_one_epoch(model, optimizer, data_loader, device, epoch, args, logger=None):
rpn_objectness_losses = AverageVal()
rpn_box_losses = AverageVal()
roi_classifier_losses = AverageVal()
roi_box_losses = AverageVal()
roi_mask_losses = AverageVal()
for p in optimizer.param_groups:
p["lr"] = args.lr_epoch
iters = len(data_loader) if args.iters < 0 else args.iters
t_m = Meter("total")
m_m = Meter("model")
b_m = Meter("backward")
model.train()
A = time.time()
for i, (image, target) in enumerate(data_loader):
T = time.time()
num_iters = epoch * len(data_loader) + i
if num_iters <= args.warmup_iters:
r = num_iters / args.warmup_iters
for j, p in enumerate(optimizer.param_groups):
p["lr"] = r * args.lr_epoch
image = image.to(device)
target = {k: v.to(device) for k, v in target.items()}
S = time.time()
losses = model(image, target)
total_loss = sum(losses.values())
m_m.update(time.time() - S)
rpn_objectness_losses.update(losses["rpn_objectness_loss"].item())
rpn_box_losses.update(losses["rpn_box_loss"].item())
roi_classifier_losses.update(losses["roi_classifier_loss"].item())
roi_box_losses.update(losses["roi_box_loss"].item())
roi_mask_losses.update(losses["roi_mask_loss"].item())
S = time.time()
total_loss.backward()
optimizer.step()
optimizer.zero_grad()
b_m.update(time.time() - S)
if num_iters % args.print_freq == 0:
print(
"{}\t".format(num_iters),
"\t".join("{:.3f}".format(l.item()) for l in losses.values()),
)
t_m.update(time.time() - T)
if i >= iters - 1:
break
A = time.time() - A
print(
"iter: {:.1f}, total: {:.1f}, model: {:.1f}, backward: {:.1f}".format(
1000 * A / iters, 1000 * t_m.avg, 1000 * m_m.avg, 1000 * b_m.avg
)
)
if logger is not None:
logger.info(
"[Train] Epoch:{}\t"
"rpn_objectness_loss:{:.4f}\t"
"rpn_box_loss:{:.4f}\t"
"roi_classifier_loss:{:.4f}\t"
"roi_box_loss:{:.4f}\t"
"roi_mask_loss:{:.4f}".format(
epoch,
rpn_objectness_losses.avg,
rpn_box_losses.avg,
roi_classifier_losses.avg,
roi_box_losses.avg,
roi_mask_losses.avg,
),
)
return A / iters
def evaluate(model, data_loader, device, args, generate=True):
if generate:
iter_eval = generate_results(model, data_loader, device, args)
dataset = data_loader #
iou_types = ["bbox", "segm"]
coco_evaluator = CocoEvaluator(dataset.coco, iou_types)
results = torch.load(args.results, map_location="cpu")
S = time.time()
if len(results) > 0:
coco_evaluator.accumulate(results)
print("accumulate: {:.1f}s".format(time.time() - S))
# collect outputs of buildin function print
temp = sys.stdout
sys.stdout = TextArea()
if len(results) > 0:
coco_evaluator.summarize()
output = sys.stdout
sys.stdout = temp
return output, iter_eval
# generate results file
@torch.no_grad()
def generate_results(model, data_loader, device, args):
iters = len(data_loader) if args.iters < 0 else args.iters
ann_labels = data_loader.ann_labels
t_m = Meter("total")
m_m = Meter("model")
coco_results = []
model.eval()
A = time.time()
for i, (image, target) in enumerate(data_loader):
T = time.time()
image = image.to(device)
target = {k: v.to(device) for k, v in target.items()}
S = time.time()
torch.cuda.synchronize()
output = model(image)
m_m.update(time.time() - S)
prediction = {
target["image_id"].item(): {k: v.cpu() for k, v in output.items()}
}
coco_results.extend(prepare_for_coco(prediction, ann_labels))
t_m.update(time.time() - T)
if i >= iters - 1:
break
A = time.time() - A
print(
"iter: {:.1f}, total: {:.1f}, model: {:.1f}".format(
1000 * A / iters, 1000 * t_m.avg, 1000 * m_m.avg
)
)
S = time.time()
print("all gather: {:.1f}s".format(time.time() - S))
torch.save(coco_results, args.results)
return A / iters
``` |
{
"source": "JinchengKim/NumCpp",
"score": 2
} |
#### File: unitTests/testScripts/TestAll.py
```python
import TestDataCube
import TestShape
import TestSlice
import TestTimer
import TestNdArray
import TestMethods
import TestConstants
import TestCoordinates
import TestFilters
import TestImageProcessing
import TestLinalg
import TestRandom
import TestRotations
import TestPolynomial
import TestFFT
import TestUtils
import TestDtypeInfo
#################################################################################
def doTest():
TestDataCube.doTest()
TestShape.doTest()
TestSlice.doTest()
TestTimer.doTest()
TestUtils.doTest()
TestDtypeInfo.doTest()
TestNdArray.doTest()
TestMethods.doTest()
TestCoordinates.doTest()
TestConstants.doTest()
TestLinalg.doTest()
TestRandom.doTest()
TestRotations.doTest()
TestFilters.doTest()
TestPolynomial.doTest()
TestFFT.doTest()
TestImageProcessing.doTest()
#################################################################################
if __name__ == '__main__':
doTest()
``` |
{
"source": "jinchenglee/pytorch-visual-perception",
"score": 3
} |
#### File: pytorch-visual-perception/model/loss.py
```python
import torch
import torch.nn.functional as F
def nll_loss(output, target):
# Convert to datatype that avoid runtime error:
# Expected object of type torch.cuda.LongTensor but found type torch.cuda.FloatTensor for argument #2 'target'
target_as_LongTensor = target.type(torch.cuda.LongTensor)
return F.nll_loss(output, target_as_LongTensor)
def bce_loss(output, target):
return F.binary_cross_entropy(output, target)
```
#### File: pytorch-visual-perception/trainer/trainer.py
```python
import numpy as np
import time
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
class Trainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
"""
def __init__(self, model, loss, metrics, optimizer, resume, config,
data_loader, num_classes=3, valid_data_loader=None, lr_scheduler=None, train_logger=None):
super(Trainer, self).__init__(model, loss, metrics, optimizer, resume, config, train_logger)
self.config = config
self.batch_size = data_loader.batch_size
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = self.batch_size
self.num_classes = num_classes
def _eval_metrics(self, output, target):
acc_metrics = np.zeros((len(self.metrics), self.num_classes))
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(output, target)
self.writer.add_scalar(metric.__name__+'_car', acc_metrics[i][0])
self.writer.add_scalar(metric.__name__+'_none', acc_metrics[i][1])
self.writer.add_scalar(metric.__name__+'_ped', acc_metrics[i][2])
self.writer.add_scalar(metric.__name__+'_rider', acc_metrics[i][3])
return acc_metrics
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
time_start = time.time()
self.model.train()
total_loss = 0
total_metrics = np.zeros((len(self.metrics), self.num_classes))
for batch_idx, (data, target) in enumerate(self.data_loader):
data, target = data.to(self.device, non_blocking=True), target.to(self.device, non_blocking=True)
self.optimizer.zero_grad()
output = self.model(data)
loss = self.loss(output, target)
loss.backward()
self.optimizer.step()
self.writer.set_step((epoch - 1) * len(self.data_loader) + batch_idx)
self.writer.add_scalar('loss', loss.item())
# TODO: do loss calculation on GPU? And piece-wise? To reduce traffic between GPUs.
total_loss += loss.item()
# print("_train: target=", target, ", output=", output, ", total_loss=", total_loss)
total_metrics += self._eval_metrics(output, target)
if self.verbosity >= 2 and batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
epoch,
batch_idx * self.data_loader.batch_size,
self.data_loader.n_samples,
100.0 * batch_idx / len(self.data_loader),
loss.item()))
if batch_idx % (100 * self.log_step):
# Save only first 4 images of batch
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
self.writer.add_image('input', make_grid(data.cpu()[:4], nrow=1, normalize=True))
self.writer.add_image('pred', make_grid(output.cpu()[:4], nrow=1, normalize=True))
self.writer.add_image('target', make_grid(target.cpu()[:4], nrow=1, normalize=True))
log = {
'loss': total_loss / len(self.data_loader),
'metrics': (total_metrics / len(self.data_loader)).tolist()
}
if self.do_validation:
val_log = self._valid_epoch(epoch)
log = {**log, **val_log}
if self.lr_scheduler is not None:
self.lr_scheduler.step()
time_end = time.time()
print("\n_train_epoch() time spent:", time_end - time_start, "s")
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
total_val_loss = 0
total_val_metrics = np.zeros((len(self.metrics), self.num_classes))
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.loss(output, target)
self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
self.writer.add_scalar('loss', loss.item())
total_val_loss += loss.item()
# print("_val: target=", target, ", output=", output, ", total_loss=", total_val_loss)
total_val_metrics += self._eval_metrics(output, target)
# Less freq. image dumpping for faster validation
if batch_idx % self.log_step == 0:
# Save only first 4 images of batch
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
self.writer.add_image('input', make_grid(data.cpu()[:4], nrow=1, normalize=True))
self.writer.add_image('pred', make_grid(output.cpu()[:4], nrow=1, normalize=True))
self.writer.add_image('target', make_grid(target.cpu()[:4], nrow=1, normalize=True))
return {
'val_loss': total_val_loss / len(self.valid_data_loader),
'val_metrics': (total_val_metrics / len(self.valid_data_loader)).tolist()
}
``` |
{
"source": "Jincheng-Sun/Kylearn",
"score": 3
} |
#### File: Kylearn/evaluation/tricks.py
```python
import numpy as np
def predict_avoid_OOM(model, data, output_dim, *args):
proba = np.empty([0, output_dim])
for i in range(round(data.shape[0]/10000)+1):
proba = np.concatenate([proba, model.get_proba(data[i*10000:i*10000+10000], *args)])
print(i)
print('Done')
return proba
``` |
{
"source": "Jincheng-Sun/Kylearn-pytorch",
"score": 2
} |
#### File: Kylearn-pytorch/Models/transformer.py
```python
from framework.model import Model
from Modules.linear import LinearClassifier
from Modules.transformer import *
from Layers.transformer import *
from Layers.encodings import *
from torch.optim.adam import Adam
from torch.optim.adamw import AdamW
from Training.losses import *
from Training.evaluation import accuracy, precision_recall, Evaluator
from Training.control import TrainingControl, EarlyStopping
from tqdm import tqdm
def parse_data_enc(input_sequence, embedding):
'''
Returns:
enc_output {Tensor, [batch_size, seq_length, d_v]} --
non_pad_mask {Tensor, [n_head, seq_length, 1]} --
slf_attn_mask {Tensor, [batch_size, seq_length, seq_length]} --
'''
slf_attn_mask = get_attn_key_pad_mask(seq_k=input_sequence, seq_q=input_sequence,
padding_idx=0) # [batch_size, seq_length, seq_length]
non_pad_mask = get_non_pad_mask(input_sequence, padding_idx=0) # [batch_size, seq_length, 1]
embedding_sequence = embedding(input_sequence)
return embedding_sequence, non_pad_mask, slf_attn_mask
def parse_data_dec(input_sequence, target_sequence, embedding):
non_pad_mask = get_non_pad_mask(target_sequence)
slf_attn_mask_subseq = get_subsequent_mask(target_sequence)
slf_attn_mask_keypad = get_attn_key_pad_mask(seq_k=target_sequence, seq_q=target_sequence)
slf_attn_mask = (slf_attn_mask_keypad + slf_attn_mask_subseq).gt(0)
dec_enc_attn_mask = get_attn_key_pad_mask(seq_k=input_sequence, seq_q=target_sequence)
embedding_sequence = embedding(target_sequence)
return embedding_sequence, non_pad_mask, slf_attn_mask, dec_enc_attn_mask
class TransormerClassifierModel(Model):
def __init__(
self, save_path, log_path, d_features, d_meta, max_length, d_classifier, n_classes, threshold=None, embedding=None,
stack='Encoder', position_encode='SinusoidPositionEncoding', optimizer=None, **kwargs):
'''**kwargs: n_layers, n_head, dropout, use_bottleneck, d_bottleneck'''
super().__init__(save_path, log_path)
self.d_output = n_classes
self.threshold = threshold
self.max_length = max_length
# ----------------------------- Model ------------------------------ #
stack_dict = {
'Plain': Plain,
'Encoder': Encoder,
'Transformer': Transformer
}
encoding_dict = {
'SinusoidPositionEncoding': SinusoidPositionEncoding,
'LinearPositionEncoding': LinearPositionEncoding,
'TimeFacilityEncoding': TimeFacilityEncoding
}
self.model = stack_dict[stack](encoding_dict[position_encode], d_features=d_features, max_seq_length=max_length, d_meta=d_meta, **kwargs)
# --------------------------- Embedding --------------------------- #
if embedding is None:
self.word_embedding = None
self.USE_EMBEDDING = False
else:
self.word_embedding = nn.Embedding.from_pretrained(embedding)
self.USE_EMBEDDING = True
# --------------------------- Classifier --------------------------- #
self.classifier = LinearClassifier(d_features, max_length, d_classifier, n_classes)
# ------------------------------ CUDA ------------------------------ #
self.data_parallel()
# ---------------------------- Optimizer --------------------------- #
self.parameters = list(self.model.parameters()) + list(self.classifier.parameters())
if optimizer == None:
self.set_optimizer(AdamW, lr=0.001, betas=(0.9, 0.999), weight_decay=0.001)
# ------------------------ training control ------------------------ #
self.controller = TrainingControl(max_step=100000, evaluate_every_nstep=100, print_every_nstep=10)
self.early_stopping = EarlyStopping(patience=50)
# --------------------- logging and tensorboard -------------------- #
self.set_logger()
self.set_summary_writer()
# ---------------------------- END INIT ---------------------------- #
def checkpoint(self, step):
checkpoint = {
'model_state_dict': self.model.state_dict(),
'classifier_state_dict': self.classifier.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'global_step': step}
return checkpoint
def train_epoch(self, train_dataloader, eval_dataloader, device, smothing, earlystop):
''' Epoch operation in training phase'''
if device == 'cuda':
assert self.CUDA_AVAILABLE
# Set model and classifier training mode
batch_counter = 0
# update param per batch
for batch in tqdm(
train_dataloader, mininterval=1,
desc=' - (Training) ', leave=False): # training_data should be a iterable
self.model.train()
self.classifier.train()
# get data from dataloader
index, position, y = map(lambda x: x.to(device), batch)
batch_size = len(index)
input_feature_sequence, non_pad_mask, slf_attn_mask = parse_data_enc(index, self.word_embedding)
# forward
self.optimizer.zero_grad()
logits, attn = self.model(input_feature_sequence, position, non_pad_mask, slf_attn_mask)
# logits = logits.view(batch_size, -1)
logits = self.classifier(logits)
# Judge if it's a regression problem
if self.d_output == 1:
pred = logits.sigmoid()
loss = mse_loss(pred, y)
else:
pred = logits
loss = cross_entropy_loss(pred, y, smoothing=smothing)
# calculate gradients
loss.backward()
# update parameters
self.optimizer.step()
# get metrics for logging
acc = accuracy(pred, y, threshold=self.threshold)
precision, recall, precision_avg, recall_avg = precision_recall(pred, y, self.d_output,
threshold=self.threshold)
batch_counter += 1
# training control
state_dict = self.controller(batch_counter)
if state_dict['step_to_print']:
self.train_logger.info(
'[TRAINING] - step: %5d, loss: %3.4f, acc: %1.4f, pre: %1.4f, rec: %1.4f' % (
state_dict['step'], loss, acc, precision[1], recall[1]))
self.summary_writer.add_scalar('loss/train', loss, state_dict['step'])
self.summary_writer.add_scalar('acc/train', acc, state_dict['step'])
self.summary_writer.add_scalar('precision/train', precision[1], state_dict['step'])
self.summary_writer.add_scalar('recall/train', recall[1], state_dict['step'])
if state_dict['step_to_evaluate']:
stop = self.val_epoch(eval_dataloader, device, state_dict['step'])
state_dict['step_to_stop'] = stop
if earlystop & stop:
break
if self.controller.current_step == self.controller.max_step:
state_dict['step_to_stop'] = True
break
return state_dict
def val_epoch(self, dataloader, device, step=0, plot=False):
''' Epoch operation in evaluation phase '''
if device == 'cuda':
assert self.CUDA_AVAILABLE
# Set model and classifier training mode
self.model.eval()
self.classifier.eval()
# use evaluator to calculate the average performance
evaluator = Evaluator()
with torch.no_grad():
for batch in tqdm(
dataloader, mininterval=5,
desc=' - (Evaluation) ', leave=False): # training_data should be a iterable
index, position, y = map(lambda x: x.to(device), batch)
batch_size = len(index)
input_feature_sequence, non_pad_mask, slf_attn_mask = parse_data_enc(index, self.word_embedding)
# get logits
logits, attn = self.model(input_feature_sequence, position, non_pad_mask, slf_attn_mask)
# logits = logits.view(batch_size, -1)
logits = self.classifier(logits)
if self.d_output == 1:
pred = logits.sigmoid()
loss = mse_loss(pred, y)
else:
pred = logits
loss = cross_entropy_loss(pred, y, smoothing=False)
acc = accuracy(pred, y, threshold=self.threshold)
precision, recall, _, _ = precision_recall(pred, y, self.d_output, threshold=self.threshold)
# feed the metrics in the evaluator
evaluator(loss.item(), acc.item(), precision[1].item(), recall[1].item())
'''append the results to the predict / real list for drawing ROC or PR curve.'''
# if plot:
# pred_list += pred.tolist()
# real_list += y.tolist()
#
# if plot:
# area, precisions, recalls, thresholds = pr(pred_list, real_list)
# plot_pr_curve(recalls, precisions, auc=area)
# get evaluation results from the evaluator
loss_avg, acc_avg, pre_avg, rec_avg = evaluator.avg_results()
self.eval_logger.info(
'[EVALUATION] - step: %5d, loss: %3.4f, acc: %1.4f, pre: %1.4f, rec: %1.4f' % (
step, loss_avg, acc_avg, pre_avg, rec_avg))
self.summary_writer.add_scalar('loss/eval', loss_avg, step)
self.summary_writer.add_scalar('acc/eval', acc_avg, step)
self.summary_writer.add_scalar('precision/eval', pre_avg, step)
self.summary_writer.add_scalar('recall/eval', rec_avg, step)
state_dict = self.early_stopping(loss_avg)
if state_dict['save']:
checkpoint = self.checkpoint(step)
self.save_model(checkpoint, self.save_path + '-step-%d_loss-%.5f'%(step,loss_avg))
return state_dict['break']
def train(self, max_epoch, train_dataloader, eval_dataloader, device,
smoothing=False, earlystop=False, save_mode='best'):
assert save_mode in ['all', 'best']
# if not (lr is None):
# self.set_optimizer(Adam, lr, betas=(0.9, 0.999), weight_decay=0)
if self.USE_EMBEDDING:
self.word_embedding = self.word_embedding.to(device)
# train for n epoch
for epoch_i in range(max_epoch):
print('[ Epoch', epoch_i, ']')
# set current epoch
self.controller.set_epoch(epoch_i + 1)
# train for on epoch
state_dict = self.train_epoch(train_dataloader, eval_dataloader, device, smoothing, earlystop)
# if state_dict['step_to_stop']:
# break
checkpoint = self.checkpoint(state_dict['step'])
self.save_model(checkpoint, self.save_path + '-step-%d' % state_dict['step'])
self.train_logger.info('[INFO]: Finish Training, ends with %d epoch(s) and %d batches, in total %d training steps.' % (
state_dict['epoch'] - 1, state_dict['batch'], state_dict['step']))
def get_predictions(self, data_loader, device, max_batches=None, activation=None):
if self.USE_EMBEDDING:
self.word_embedding = self.word_embedding.to(device)
pred_list = []
real_list = []
self.model.eval()
self.classifier.eval()
batch_counter = 0
with torch.no_grad():
for batch in tqdm(
data_loader,
desc=' - (Testing) ', leave=False):
index, position, y = map(lambda x: x.to(device), batch)
input_feature_sequence, non_pad_mask, slf_attn_mask = parse_data_enc(index, self.word_embedding)
# get logits
logits, attn = self.model(input_feature_sequence, position, non_pad_mask, slf_attn_mask)
logits = logits.view(logits.shape[0], -1)
logits = self.classifier(logits)
# Whether to apply activation function
if activation != None:
pred = activation(logits)
else:
pred = logits.softmax(dim=-1)
pred_list += pred.tolist()
real_list += y.tolist()
if max_batches != None:
batch_counter += 1
if batch_counter >= max_batches:
break
return pred_list, real_list
```
#### File: Kylearn-pytorch/Modules/linear.py
```python
import torch
import torch.nn as nn
class LinearClassifier(nn.Module):
def __init__(self, d_features, seq_length, d_hid, d_out):
super(LinearClassifier, self).__init__()
self.d_features = d_features
self.maxpool = torch.nn.MaxPool1d(seq_length, stride=1, padding=0)
self.fc1 = nn.Linear(d_features, d_hid)
self.activation = nn.functional.leaky_relu
self.fc2 = nn.Linear(d_hid, d_out)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
x = x.transpose(1, 2).contiguous()
x = self.maxpool(x)
x = x.view(-1, self.d_features)
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
return x
```
#### File: Kylearn-pytorch/utils/plot_curves.py
```python
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics import roc_auc_score, roc_curve
plt.rcParams['savefig.dpi'] = 300 # pixel
plt.rcParams['figure.dpi'] = 300 # resolution
plt.rcParams["figure.figsize"] = [5, 4] # figure size
def precision_recall(y_pred, y_test):
precisions, recalls, thresholds = precision_recall_curve(y_true=y_test, probas_pred=y_pred)
area = auc(recalls, precisions)
return area, precisions, recalls, thresholds
def plot_pr_curve(recalls, precisions, auc, x_axis = 1):
plt.rcParams['savefig.dpi'] = 300 # pixel
plt.rcParams['figure.dpi'] = 300 # resolution
plt.rcParams["figure.figsize"] = [5, 4] # figure size
plt.plot(recalls, precisions, color="darkorange", label='Precision-Recall curve (area = %0.3f)' % auc)
plt.plot([1, 0], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, x_axis])
plt.ylim([0.0, 1.05])
plt.xlabel('recall')
plt.ylabel('precision')
plt.title('Precision-Recall curve')
plt.legend(loc="lower right")
return plt
def auc_roc(y_pred, y_test):
auc = roc_auc_score(y_true=y_test, y_score=y_pred)
fprs, tprs, thresholds = roc_curve(y_true=y_test, y_score=y_pred)
return auc, fprs, tprs, thresholds
def plot_roc_curve(fprs, tprs, auc, x_axis = 1):
plt.plot(fprs, tprs, color="darkorange", label='ROC curve (area = %0.3f)' % auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, x_axis])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
return plt
``` |
{
"source": "jinchenxiangdan/ReaDF",
"score": 2
} |
#### File: jinchenxiangdan/ReaDF/ReaDF.py
```python
import sys
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMainWindow, QDesktopWidget, \
QApplication, QPushButton, QFrame, QAction, QFileDialog, QHBoxLayout, QTableWidget, QTableWidgetItem, \
QLabel, QMenu, QAbstractItemView
from PyQt5.QtGui import QIcon, QPixmap, QImage
from PyQt5.QtCore import QSize, Qt
from main_window import MainWindow
import fitz
def init_tool_bar(main_window):
"""
Initial tool bar of a main window. Add {} into the tool bar.
:param main_window: a QMainWindow object
:return:
"""
# set up a empty tool bar
tool_bar = QtWidgets.QToolBar(main_window)
tool_bar.setMovable(True)
tool_bar.setObjectName("ToolBar")
main_window.addToolBar(QtCore.Qt.TopToolBarArea, tool_bar)
open_file_button = QtWidgets.QAction(main_window)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("img/iconfinder_plus.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
open_file_button.setIcon(icon)
open_file_button.setObjectName("OpenFile")
set_bar = QtWidgets.QAction(main_window)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("img/iconfinder_Tile.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
set_bar.setIcon(icon1)
set_bar.setObjectName("setbar")
action = QtWidgets.QAction(main_window)
action.setCheckable(True)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("img/iconfinder_close.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
action.setIcon(icon2)
action.setObjectName("action")
tool_bar.addAction(open_file_button)
tool_bar.addAction(action)
tool_bar.addAction(set_bar)
class ReaDF(QMainWindow, MainWindow):
_TITLE = "ReaDF"
_FILE_PATH = "pdf_examples/"
def __init__(self, parent=None):
"""
Initial ReaDF window, icon,
:param parent:
"""
super(ReaDF, self).__init__(parent)
self.setup_ui(self)
init_tool_bar(self)
##################################################################
# test
##################################################################
print(fitz.__doc__)
example_pdf_file_path = "pdf_examples/declaration_of_support.pdf"
example_pdf_file = fitz.open(example_pdf_file_path)
print(example_pdf_file.metadata)
print(example_pdf_file.pageCount)
print(example_pdf_file.getToC())
pix_map = example_pdf_file[0].getPixmap()
##################################################################
# self.y = 0
# self.x = 0
self.setWindowIcon(QIcon('img/book.png'))
self.setWindowTitle(self._TITLE)
# self.screen = QDesktopWidget().screenGeometry()
# self.setup_ui(self)
# self.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint)
# self.setContextMenuPolicy(Qt.NoContextMenu)
# # self.setFixedSize(self.screen.width(), self.screen.height())
# self.table = QTableWidget()
# self.setCentralWidget(self.table)
# self.init_ui()
# self.book_list = []
def filter_book(self, fname):
if not fname:
return False
if fname not in self.book_list:
self.book_list.append(fname)
return True
return False
def read_files(self):
fname, _ = QFileDialog.getOpenFileName(self, "Open files", './', '(*.pdf)')
return fname
def open(self):
fname = self.read_files()
if self.filter_book(fname):
self.setIcon(fname)
def set_table_style(self):
# 开启水平与垂直滚轴
self.table.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.table.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
# 设置 5 行 8 列 的表格
self.table.setColumnCount(8)
self.table.setRowCount(5)
# 设置标准宽度
self.width = self.screen.width() // 8
# 设置单元格的宽度
for i in range(8):
self.table.setColumnWidth(i, self.width)
# 设置单元格的高度
# 设置纵横比为 4 : 3
for i in range(5):
self.table.setRowHeight(i, self.width * 4 // 3)
# 隐藏标题栏
self.table.verticalHeader().setVisible(False)
self.table.horizontalHeader().setVisible(False)
# 禁止编辑
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
# 不显示网格线
self.table.setShowGrid(False)
# 将单元格绑定右键菜单
# 点击单元格,调用 self.generateMenu 函数
self.table.setContextMenuPolicy(Qt.CustomContextMenu)
self.table.customContextMenuRequested.connect(self.generate_menu)
def read_pdf(self, file_path):
pdf_file = fitz.open(file_path)
# TODO: add custom setting on naming files
title = file_path.split('/' or '\\')[-1].replace('.pdf', '')
def setIcon(self, file_name):
pdf_file = fitz.open(file_name)
cover = render_pdf_page(pdf_file, True)
label = QLabel(self)
label.setScaledContents(True)
label.setPixmap(QPixmap(cover))
self.table.setCellWidget(self.x, self.y, label)
del label
self.crow, self.ccol = self.x, self.y
if (not self.y % 7) and (self.y):
self.x += 1
self.y = 0
else:
self.y += 1
def generate_menu(self, pos):
row_num = col_num = -1
# 获取选中的单元格的行数以及列数
for i in self.table.selectionModel().selection().indexes():
row_num = i.row()
col_num = i.column()
# 若选取的单元格中有元素,则支持右键菜单
if (row_num < self.crow) or (row_num == self.crow and col_num <= self.ccol):
menu = QMenu()
item1 = menu.addAction('开始阅读')
item2 = menu.addAction('删除图书')
# 获取选项
action = menu.exec_(self.table.mapToGlobal(pos))
if action == item1:
index = row_num * 8 + col_num
fname = self.book_list[index]
if fname not in self.read_list and len(self.read_list) < 2:
self.read_list.append(fname)
self.read_book(fname)
elif action == item2:
self.delete_book(row_num, col_num)
def render_pdf_page(pdf_page, for_cover=False):
zoom_matrix = fitz.Matrix(4, 4)
if for_cover:
zoom_matrix = fitz.Matrix(1, 1)
page_pixmap = pdf_page.getPixmap(
matrix=zoom_matrix,
alpha=False
)
image_format = QtGui.QImage.Format_RGB888
page_qimg = QtGui.QImage(
page_pixmap.samples,
page_pixmap.width,
page_pixmap.height,
page_pixmap.stride,
image_format)
pixmap = QtGui.QPixmap()
pixmap.convertFromImage(page_qimg)
return pixmap
``` |
{
"source": "jinchi111/androguard",
"score": 2
} |
#### File: core/bytecodes/dvm_types.py
```python
from enum import IntEnum
from collections import OrderedDict
# This file contains dictionaries used in the Dalvik Format.
# Used to identify different types of operands
KIND_METH = 0
KIND_STRING = 1
KIND_FIELD = 2
KIND_TYPE = 3
VARIES = 4
INLINE_METHOD = 5
VTABLE_OFFSET = 6
FIELD_OFFSET = 7
KIND_RAW_STRING = 8
OPERAND_REGISTER = 0
OPERAND_LITERAL = 1
OPERAND_RAW = 2
OPERAND_OFFSET = 3
OPERAND_KIND = 0x100
# https://source.android.com/devices/tech/dalvik/dex-format#type-codes
class TypeMapItem(IntEnum):
HEADER_ITEM = 0x0
STRING_ID_ITEM = 0x1
TYPE_ID_ITEM = 0x2
PROTO_ID_ITEM = 0x3
FIELD_ID_ITEM = 0x4
METHOD_ID_ITEM = 0x5
CLASS_DEF_ITEM = 0x6
MAP_LIST = 0x1000
TYPE_LIST = 0x1001
ANNOTATION_SET_REF_LIST = 0x1002
ANNOTATION_SET_ITEM = 0x1003
CLASS_DATA_ITEM = 0x2000
CODE_ITEM = 0x2001
STRING_DATA_ITEM = 0x2002
DEBUG_INFO_ITEM = 0x2003
ANNOTATION_ITEM = 0x2004
ENCODED_ARRAY_ITEM = 0x2005
ANNOTATIONS_DIRECTORY_ITEM = 0x2006
@staticmethod
def _get_dependencies():
return OrderedDict([
(TypeMapItem.HEADER_ITEM, set()),
(TypeMapItem.STRING_ID_ITEM, set([TypeMapItem.STRING_DATA_ITEM])),
(TypeMapItem.TYPE_ID_ITEM, set([TypeMapItem.STRING_ID_ITEM])),
(TypeMapItem.PROTO_ID_ITEM, set([TypeMapItem.STRING_ID_ITEM, TypeMapItem.TYPE_ID_ITEM, TypeMapItem.TYPE_LIST])),
(TypeMapItem.FIELD_ID_ITEM, set([TypeMapItem.STRING_ID_ITEM, TypeMapItem.TYPE_ID_ITEM])),
(TypeMapItem.METHOD_ID_ITEM, set([TypeMapItem.STRING_ID_ITEM, TypeMapItem.TYPE_ID_ITEM, TypeMapItem.PROTO_ID_ITEM])),
(TypeMapItem.CLASS_DEF_ITEM, set([TypeMapItem.TYPE_ID_ITEM, TypeMapItem.TYPE_LIST, TypeMapItem.STRING_ID_ITEM, TypeMapItem.DEBUG_INFO_ITEM, TypeMapItem.ANNOTATIONS_DIRECTORY_ITEM, TypeMapItem.CLASS_DATA_ITEM, TypeMapItem.ENCODED_ARRAY_ITEM])),
(TypeMapItem.MAP_LIST, set()),
(TypeMapItem.TYPE_LIST, set([TypeMapItem.TYPE_ID_ITEM])),
(TypeMapItem.ANNOTATION_SET_REF_LIST, set([TypeMapItem.ANNOTATION_SET_ITEM])),
(TypeMapItem.ANNOTATION_SET_ITEM, set([TypeMapItem.ANNOTATION_ITEM])),
(TypeMapItem.CLASS_DATA_ITEM, set([TypeMapItem.FIELD_ID_ITEM, TypeMapItem.METHOD_ID_ITEM])),
(TypeMapItem.CODE_ITEM, set([TypeMapItem.DEBUG_INFO_ITEM, TypeMapItem.TYPE_ID_ITEM])),
(TypeMapItem.STRING_DATA_ITEM, set()),
(TypeMapItem.DEBUG_INFO_ITEM, set([TypeMapItem.STRING_ID_ITEM, TypeMapItem.TYPE_ID_ITEM])),
(TypeMapItem.ANNOTATION_ITEM, set([TypeMapItem.PROTO_ID_ITEM, TypeMapItem.STRING_ID_ITEM, TypeMapItem.TYPE_ID_ITEM, TypeMapItem.FIELD_ID_ITEM, TypeMapItem.METHOD_ID_ITEM])),
(TypeMapItem.ENCODED_ARRAY_ITEM, set([TypeMapItem.PROTO_ID_ITEM, TypeMapItem.STRING_ID_ITEM, TypeMapItem.TYPE_ID_ITEM, TypeMapItem.FIELD_ID_ITEM, TypeMapItem.METHOD_ID_ITEM])),
(TypeMapItem.ANNOTATIONS_DIRECTORY_ITEM, set([TypeMapItem.FIELD_ID_ITEM, TypeMapItem.METHOD_ID_ITEM, TypeMapItem.ANNOTATION_SET_ITEM]))
])
@staticmethod
def determine_load_order():
dependencies = TypeMapItem._get_dependencies()
ordered = dict()
while dependencies:
found_next = False
for type_name, unloaded in dependencies.items():
if not unloaded:
ordered[type_name] = len(ordered)
found_next = True
break
if found_next is False:
raise Exception('recursive loading dependency')
dependencies.pop(type_name)
for unloaded in dependencies.values():
unloaded.discard(type_name)
return ordered
# https://source.android.com/devices/tech/dalvik/dex-format#access-flags
ACCESS_FLAGS = {
0x1: 'public',
0x2: 'private',
0x4: 'protected',
0x8: 'static',
0x10: 'final',
0x20: 'synchronized',
0x40: 'bridge',
0x80: 'varargs',
0x100: 'native',
0x200: 'interface',
0x400: 'abstract',
0x800: 'strictfp',
0x1000: 'synthetic',
0x4000: 'enum',
0x8000: 'unused',
0x10000: 'constructor',
0x20000: 'synchronized',
}
# https://source.android.com/devices/tech/dalvik/dex-format#typedescriptor
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
```
#### File: androguard/gui/DisasmViewMode.py
```python
from PyQt5 import QtGui, QtCore
from androguard.core.bytecodes import dvm_types
from androguard.gui import TextSelection
from androguard.gui.ViewMode import ViewMode
from androguard.gui.cemu import ConsoleEmulator, Directions
import logging
log = logging.getLogger("androguard.gui")
MNEMONIC_COLUMN = 30
MNEMONIC_WIDTH = 30
class InstructionView:
def __init__(self, ins):
self.ins = ins
self._indexTable = []
self._Operands = []
self._Comments = []
self.loaded = False
def AddComment(self, cmt):
self._Comments.append(cmt)
def Load(self):
if self.loaded:
return
H = self.get_hex().split(' ')
for i, h in enumerate(H):
self._indexTable += [(i * 3, len(h), h)]
self._indexTable += [(MNEMONIC_COLUMN, len(self.get_name()), self.get_name())]
i = 0
offset = 0
for operand in self.ins.get_operands(0):
value = None
if operand[0] == dvm_types.OPERAND_REGISTER:
value = [operand[0], "v%d" % operand[1]]
elif operand[0] == dvm_types.OPERAND_LITERAL:
value = [operand[0], "%d" % operand[1]]
elif operand[0] == dvm_types.OPERAND_RAW:
value = [operand[0], "%s" % operand[1]]
elif operand[0] == dvm_types.OPERAND_OFFSET:
value = [operand[0], "%d" % operand[1]]
elif operand[0] & dvm_types.OPERAND_KIND:
if operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_STRING):
value = [operand[0], "%s" % operand[2]]
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_METH):
value = [operand[0], "%s" % operand[2]]
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_FIELD):
value = [operand[0], "%s" % operand[2]]
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_TYPE):
value = [operand[0], "%s" % operand[2]]
if value:
if offset > 0:
offset += 1
t = (offset + MNEMONIC_COLUMN + MNEMONIC_WIDTH, len(value[1]), value[1])
self._indexTable += [t]
self._Operands.append(value)
offset += len(value[1])
self.loaded = True
def get_hex(self):
return self.ins.get_hex()
def get_length(self):
return self.ins.get_length() * 2
def get_name(self):
return self.ins.get_name()
def get_operands(self, idx=-1):
return self._Operands
def get_symbol(self):
return None
def get_output(self):
return self.ins.get_output()
def tokens(self):
return self._indexTable
def get_comments(self):
return self._Comments
@property
def indexTable(self):
return self._indexTable
def getSelectionWidth(self, cx):
for i, t in enumerate(self.indexTable):
idx, length, value = t
if cx == idx:
return length
return 0
def getEndCursor(self):
idx, length, value = self.indexTable[-1]
return idx
def getNearestCursor(self, cx):
if cx > self.getEndCursor():
return self.getEndCursor()
i = len(self.indexTable) - 1
while i > 0:
idx, length, value = self.indexTable[i]
if cx >= idx:
return idx
i -= 1
return 0
def getNextCursor(self, cx, direction=''):
for i, t in enumerate(self.indexTable):
idx, length, value = t
if cx == idx:
break
if direction == Directions.Right:
if i < len(self.indexTable) - 1:
idx, length, value = self.indexTable[i + 1]
else:
return 0, 1
if direction == Directions.Left:
if i > 0:
idx, length, value = self.indexTable[i - 1]
else:
return 0, -1
return idx, 0
def getSelectedToken(self, cx):
for i, t in enumerate(self._indexTable):
idx, length, value = t
if cx == idx:
return t
return None, None, None
class DisasmViewMode(ViewMode):
def __init__(self, themes, width, height, data, cursor, widget=None):
super().__init__()
self.themes = themes
self.dataModel = data
self.addHandler(self.dataModel)
self.width = width
self.height = height
self.cursor = cursor
self.widget = widget
self.refresh = True
# background brush
self.backgroundBrush = QtGui.QBrush(self.themes['background'])
# text font
self.font = themes['font']
# font metrics. assume font is monospaced
self.font.setKerning(False)
self.font.setFixedPitch(True)
fm = QtGui.QFontMetrics(self.font)
self._fontWidth = fm.width('a')
self._fontHeight = fm.height()
self.FlowHistory = []
self.CACHE_OPCODES = []
self.CACHE_IDX_OPCODES = {}
self.CACHE_IDX_OPCODES_OFF = {}
self.OPCODES = []
vm_analysis = self.dataModel.dx
methods = [i for i in self.dataModel.current_class.get_methods()]
log.debug(methods)
methods = sorted(methods, key=lambda x: x.get_address(), reverse=True)
offset = 0
cnt = 0
for method in methods:
mx = vm_analysis.get_method(method)
for DVMBasicMethodBlockInstruction in method.get_instructions():
# for DVMBasicMethodBlock in mx.basic_blocks.gets():
# for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions():
ins = InstructionView(DVMBasicMethodBlockInstruction)
self.CACHE_OPCODES.append(ins)
self.CACHE_IDX_OPCODES[offset] = ins
self.CACHE_IDX_OPCODES_OFF[offset] = cnt
offset += ins.get_length()
cnt += 1
self.max_offset = offset
log.debug(sorted(self.CACHE_IDX_OPCODES_OFF.keys()))
self.textPen = QtGui.QPen(self.themes['pen'], 0, QtCore.Qt.SolidLine)
self.resize(width, height)
self.Paints = {}
self.Ops = []
self.newPix = None
self.selector = TextSelection.DisasmSelection(themes, self)
def GetLengthOpcodes(self):
length = 0
for i in self.CACHE_OPCODES:
length += i.get_length()
return length
def FeedOpcodes(self, cnt):
log.debug('FEED %s', cnt)
self.OPCODES = []
for i in range(0, min(cnt, len(self.CACHE_OPCODES))):
ins = self.CACHE_OPCODES[i]
ins.Load()
self.OPCODES.append(ins)
@property
def fontWidth(self):
return self._fontWidth
@property
def fontHeight(self):
return self._fontHeight
def setTransformationEngine(self, engine):
self.transformationEngine = engine
def resize(self, width, height):
self.width = width - width % self.fontWidth
self.height = height - height % self.fontHeight
self.computeTextArea()
self.qpix = self._getNewPixmap(self.width, self.height + self.SPACER)
self.refresh = True
self.FeedOpcodes(self.ROWS)
def computeTextArea(self):
self.COLUMNS = self.width // self.fontWidth
self.ROWS = self.height // self.fontHeight
self.notify(self.ROWS, self.COLUMNS)
def getPixmap(self):
for t in self.Ops:
if len(t) == 1:
t[0]()
else:
t[0](*t[1:])
self.Ops = []
if not self.newPix:
self.draw()
return self.newPix
def getPageOffset(self):
return self.dataModel.getOffset()
def getGeometry(self):
return self.COLUMNS, self.ROWS
def getDataModel(self):
return self.dataModel
def startSelection(self):
self.selector.startSelection()
def stopSelection(self):
self.selector.stopSelection()
def getCursorOffsetInPage(self):
x, y = self.cursor.getPosition()
preY = sum([asm.get_length() for asm in self.OPCODES[:y]])
if len(self.OPCODES) - 1 < y:
return 0
asm = self.OPCODES[y]
if x < len(asm.get_hex()):
postY = x // 3
else:
postY = asm.get_length()
return preY + postY
def getCursorAbsolutePosition(self):
offset = self.getCursorOffsetInPage()
return self.dataModel.getOffset() + offset
def drawCursor(self, qp):
cursorX, cursorY = self.cursor.getPosition()
log.debug("%s / %s", cursorX, cursorY)
xstart = cursorX
if cursorY not in self.OPCODES:
log.warning("Impossible to find instruction at cursor %d, %d" % (cursorY, len(self.OPCODES)))
return
asm = self.OPCODES[cursorY]
width = asm.getSelectionWidth(xstart)
qp.setBrush(QtGui.QColor(255, 255, 0))
qp.setOpacity(0.5)
qp.drawRect(xstart * self.fontWidth,
cursorY * self.fontHeight,
width * self.fontWidth,
self.fontHeight + 2)
qp.setOpacity(1)
def drawSelected(self, qp):
qp.setFont(self.font)
cursorX, cursorY = self.cursor.getPosition()
if len(self.OPCODES) - 1 < cursorY:
return
asm = self.OPCODES[cursorY]
_, width, text = asm.getSelectedToken(cursorX)
for i, asm in enumerate(self.OPCODES):
for idx, length, value in asm.tokens():
# skip current cursor position
if cursorY == i and cursorX == idx:
continue
# check every line, if match, select it
if value == text:
qp.setOpacity(0.4)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
qp.fillRect(idx * self.fontWidth,
i * self.fontHeight + 2,
width * self.fontWidth,
self.fontHeight,
brush)
qp.setOpacity(1)
def drawBranch(self, qp):
qp.fillRect(-50, 0, 50, self.ROWS * self.fontHeight, self.backgroundBrush)
def drawBranch2(self, qp):
cursorX, cursorY = self.cursor.getPosition()
if len(self.OPCODES) - 1 < cursorY:
return
asm = self.OPCODES[cursorY]
if asm.isBranch():
tsize = sum([o.size for o in self.OPCODES])
msize = sum([o.size for o in self.OPCODES[:cursorY]])
half = self.fontHeight // 2
# branch address
target = asm.branchAddress()
if target is None:
return
screenVA = self._getVA(self.dataModel.getOffset())
if screenVA < target < self._getVA(self.dataModel.getOffset()) + tsize - self.OPCODES[-1].size:
# branch target is in screen
sz = 0
for i, t in enumerate(self.OPCODES):
sz += t.size
if sz + self._getVA(self.dataModel.getOffset()) >= target:
break
qp.setPen(QtGui.QPen(QtGui.QColor(0, 192, 0), 1, QtCore.Qt.SolidLine))
# draw the three lines
qp.drawLine(-5, cursorY * self.fontHeight + self.fontHeight // 2, -30, cursorY * self.fontHeight + half)
qp.drawLine(-30, cursorY * self.fontHeight + self.fontHeight // 2, -30,
(i + 1) * self.fontHeight + half)
qp.drawLine(-30, (i + 1) * self.fontHeight + half, -15, (i + 1) * self.fontHeight + half)
# draw arrow
points = [QtCore.QPoint(-15, (i + 1) * self.fontHeight + half - 5),
QtCore.QPoint(-15, (i + 1) * self.fontHeight + half + 5),
QtCore.QPoint(-5, (i + 1) * self.fontHeight + half), ]
needle = QtGui.QPolygon(points)
qp.setBrush(QtGui.QBrush(QtGui.QColor(0, 128, 0)))
qp.drawPolygon(needle)
elif target > screenVA:
# branch is at greater address, out of screen
qp.setPen(QtGui.QPen(QtGui.QColor(0, 192, 0), 1, QtCore.Qt.DotLine))
# draw the two lines
qp.drawLine(-5, cursorY * self.fontHeight + self.fontHeight // 2, -30, cursorY * self.fontHeight + half)
qp.drawLine(-30, cursorY * self.fontHeight + self.fontHeight // 2, -30,
(self.ROWS - 2) * self.fontHeight + half)
# draw arrow
points = [QtCore.QPoint(-25, (self.ROWS - 2) * self.fontHeight + half),
QtCore.QPoint(-35, (self.ROWS - 2) * self.fontHeight + half),
QtCore.QPoint(-30, (self.ROWS - 2) * self.fontHeight + 2 * half), ]
needle = QtGui.QPolygon(points)
qp.setBrush(QtGui.QBrush(QtGui.QColor(0, 128, 0)))
qp.drawPolygon(needle)
else:
# upper arrow
# branch is at lower address, out of screen
qp.setPen(QtGui.QPen(QtGui.QColor(0, 192, 0), 1, QtCore.Qt.DotLine))
# draw the two lines
qp.drawLine(-5, cursorY * self.fontHeight + self.fontHeight // 2, -30, cursorY * self.fontHeight + half)
qp.drawLine(-30, cursorY * self.fontHeight + self.fontHeight // 2, -30, 1 * self.fontHeight + half)
# draw arrow
points = [QtCore.QPoint(-25, 1 * self.fontHeight + half),
QtCore.QPoint(-35, 1 * self.fontHeight + half),
QtCore.QPoint(-30, 1 * self.fontHeight), ]
needle = QtGui.QPolygon(points)
qp.setBrush(QtGui.QBrush(QtGui.QColor(0, 128, 0)))
qp.drawPolygon(needle)
def draw(self, refresh=False):
if self.dataModel.getOffset() in self.Paints:
self.refresh = False
self.qpix = QtGui.QPixmap(self.Paints[self.dataModel.getOffset()])
self.drawAdditionals()
return
if self.refresh or refresh:
qp = QtGui.QPainter()
qp.begin(self.qpix)
self.drawTextMode(qp)
self.refresh = False
qp.end()
# self.Paints[self.dataModel.getOffset()] = QtGui.QPixmap(self.qpix)
self.drawAdditionals()
def drawAdditionals(self):
self.newPix = self._getNewPixmap(self.width, self.height + self.SPACER)
qp = QtGui.QPainter()
qp.begin(self.newPix)
qp.setWindow(-50, 0, self.COLUMNS * self.fontWidth, self.ROWS * self.fontHeight)
qp.drawPixmap(0, 0, self.qpix)
# self.transformationEngine.decorateText()
# highlight selected text
self.selector.highlightText()
# draw other selections
self.selector.drawSelections(qp)
# draw our cursor
self.drawCursor(qp)
self.drawBranch(qp)
self.drawSelected(qp)
qp.end()
def _getNewPixmap(self, width, height):
return QtGui.QPixmap(width, height)
def getColumnsbyRow(self, row):
if row < len(self.OPCODES):
obj = self.OPCODES[row]
return obj.get_length()
else:
return 0
def _getVA(self, offset):
if self.plugin:
return self.plugin.hintDisasmVA(offset)
return 0
def _drawRow(self, qp, cemu, row, asm, offset=-1):
log.debug('DRAW AN INSTRUCTION %s %s %s %s %s', asm, row, asm.get_name(), len(asm.get_operands(offset)), hex(self.getPageOffset()))
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
hex_data = asm.get_hex()
# write hexdump
cemu.writeAt(0, row, hex_data)
# fill with spaces
cemu.write((MNEMONIC_COLUMN - len(hex_data)) * ' ')
# let's color some branch instr
# if asm.isBranch():
# qp.setPen(QtGui.QPen(QtGui.QColor(255, 80, 0)))
# else:
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
mnemonic = asm.get_name()
cemu.write(mnemonic)
# leave some spaces
cemu.write((MNEMONIC_WIDTH - len(mnemonic)) * ' ')
if asm.get_symbol():
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
cemu.write_c('[')
qp.setPen(QtGui.QPen(QtGui.QColor('yellow'), 1, QtCore.Qt.SolidLine))
cemu.write(asm.get_symbol())
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
cemu.write_c(']')
self._write_operands(asm, qp, cemu, offset)
self._write_comments(asm, qp, cemu, offset)
def _write_comments(self, asm, qp, cemu, offset):
comments = asm.get_comments()
if comments:
cemu.write(30 * ' ')
qp.setPen(QtGui.QPen(QtGui.QColor(82, 192, 192), 1, QtCore.Qt.SolidLine))
cemu.write('; "{}"'.format(' '.join(comments)))
def _write_operands(self, asm, qp, cemu, offset):
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
operands = asm.get_operands(offset)
for operand in operands:
qp.save()
if operand[0] == dvm_types.OPERAND_REGISTER:
qp.setPen(QtGui.QPen(QtGui.QColor('white')))
cemu.write("%s" % operand[1])
elif operand[0] == dvm_types.OPERAND_LITERAL:
qp.setPen(QtGui.QPen(QtGui.QColor('yellow')))
cemu.write("%s" % operand[1])
elif operand[0] == dvm_types.OPERAND_RAW:
qp.setPen(QtGui.QPen(QtGui.QColor('red')))
cemu.write("%s" % operand[1])
elif operand[0] == dvm_types.OPERAND_OFFSET:
qp.setPen(QtGui.QPen(QtGui.QColor('purple')))
cemu.write("%s" % operand[1])
elif operand[0] & dvm_types.OPERAND_KIND:
if operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_STRING):
qp.setPen(QtGui.QPen(QtGui.QColor('red')))
cemu.write("%s" % operand[1])
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_METH):
qp.setPen(QtGui.QPen(QtGui.QColor('cyan')))
cemu.write("%s" % operand[1])
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_FIELD):
qp.setPen(QtGui.QPen(QtGui.QColor('green')))
cemu.write("%s" % operand[1])
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_TYPE):
qp.setPen(QtGui.QPen(QtGui.QColor('blue')))
cemu.write("%s" % operand[1])
cemu.write(" ")
qp.restore()
def _write_instruction2(self, asm, qp, cemu):
s = asm.operands
idx = 0
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
for tok in asm.lexer:
if tok.lexpos > idx:
cemu.write(s[idx:tok.lexpos])
idx = tok.lexpos
qp.save()
if tok.type == 'REGISTER':
qp.setPen(QtGui.QPen(QtGui.QColor('white')))
if tok.type == 'NUMBER':
qp.setPen(QtGui.QPen(QtGui.QColor('green')))
cemu.write(tok.value)
qp.restore()
idx = tok.lexpos + len(tok.value)
if idx < len(s):
cemu.write(s[idx:])
def drawTextMode(self, qp):
log.debug('OFFSET %s', self.dataModel.getOffset())
# draw background
qp.fillRect(0, 0, self.COLUMNS * self.fontWidth, self.ROWS * self.fontHeight, self.backgroundBrush)
# set text pen&font
qp.setFont(self.font)
qp.setPen(self.textPen)
cemu = ConsoleEmulator(qp, self.ROWS, self.COLUMNS)
offset = 0
for i in range(self.ROWS):
if i < len(self.OPCODES):
asm = self.OPCODES[i]
self._drawRow(qp, cemu, i, asm, offset)
offset += asm.get_length()
def _getRowInPage(self, offset):
offset -= self.dataModel.getOffset()
size = 0
for i, asm in enumerate(self.OPCODES):
if size + asm.get_length() > offset:
return i
size += asm.get_length()
return None
def _getOffsetOfRow(self, row):
# of course, it could be done nicely, not like this
size = 0
for i, asm in enumerate(self.OPCODES):
if i == row:
return size
size += asm.get_length()
return None
def goTo(self, offset):
log.debug("GOTO %s", offset)
tsize = sum([opcode.get_length() for opcode in self.OPCODES])
if self.dataModel.getOffset() + tsize > offset > self.dataModel.getOffset():
# if in current page, move cursor
row = self._getRowInPage(offset)
off_row = self._getOffsetOfRow(row)
diff = offset - self.dataModel.getOffset() - off_row # self.OPCODES[row].size
if row is not None:
self.cursor.moveAbsolute(diff * 3, row)
self.draw(refresh=False)
else:
# else, move page
self.dataModel.goTo(offset)
self.FeedOpcodes(self.ROWS)
self.cursor.moveAbsolute(0, 0)
self.draw(refresh=True)
# TODO: getDisplayablePage() won't contain what we want to disasm. we will use dataModel
# in this view, getDisplayablePage will contain disasm text, because that is what is displayed
if self.widget:
self.widget.update()
def scrollPages(self, number, cachePix=None, pageOffset=None):
self.scroll(0, -number * self.ROWS, cachePix=cachePix, pageOffset=pageOffset)
def scroll_v(self, dy, cachePix=None, pageOffset=None):
log.debug('scroll_v %s %s %s %s', dy, cachePix, pageOffset, hex(self.getCursorAbsolutePosition()))
RowsToDraw = []
factor = abs(dy)
# repeat as many rows we have scrolled
for row in range(factor):
current_idx = None
if dy < 0:
tsize = sum([asm.get_length() for asm in self.OPCODES])
current_offset = self.dataModel.getOffset() + tsize
if current_offset not in self.CACHE_IDX_OPCODES_OFF:
log.debug('INVALID OFFSET %s', hex(current_offset))
return
current_idx = self.CACHE_IDX_OPCODES_OFF[current_offset] - 1
log.debug("IDX %s %s", current_idx, hex(current_offset))
if current_idx + 1 >= len(self.CACHE_OPCODES):
log.debug('END OF DATA')
return
current_idx += 1
if dy >= 0:
current_offset = self.dataModel.getOffset()
current_idx = self.CACHE_IDX_OPCODES_OFF[current_offset]
log.debug("IDX %s %s", current_idx, hex(current_offset))
# start = self.CACHE_OPCODES[self.CACHE_IDX_OPCODES_OFF[self.getCursorAbsolutePosition()]-1]
current_idx -= 1
newins = self.CACHE_OPCODES[current_idx]
if dy < 0:
self.dataModel.slide(self.OPCODES[0].get_length())
del self.OPCODES[0]
if dy >= 0:
self.dataModel.slide(-newins.get_length())
del self.OPCODES[len(self.OPCODES) - 1]
if dy < 0:
self.OPCODES.append(newins)
if dy > 0:
self.OPCODES.insert(0, newins)
if dy < 0:
RowsToDraw.append((self.ROWS + row, newins))
if dy > 0:
RowsToDraw.append((-row - 1, newins))
log.debug('ROW TO DRAW %s', RowsToDraw)
if len(RowsToDraw) < abs(dy):
# maybe we couldn't draw dy rows (possible we reached the beginning of the data to early), recalculate dy
dy = len(RowsToDraw) * dy / abs(dy)
factor = abs(dy)
if not cachePix:
self.qpix.scroll(0, dy * self.fontHeight, self.qpix.rect())
qp = QtGui.QPainter()
if cachePix:
qp.begin(cachePix)
else:
qp.begin(self.qpix)
qp.setFont(self.font)
qp.setPen(self.textPen)
# erase rows that will disappear
if dy < 0:
qp.fillRect(0, (self.ROWS - factor) * self.fontHeight, self.fontWidth * self.COLUMNS,
factor * self.fontHeight, self.backgroundBrush)
if dy > 0:
qp.fillRect(0, 0, self.fontWidth * self.COLUMNS, factor * self.fontHeight, self.backgroundBrush)
cemu = ConsoleEmulator(qp, self.ROWS, self.COLUMNS)
for row, asm in RowsToDraw:
asm.Load()
self._drawRow(qp, cemu, dy + row, asm)
qp.end()
def scroll(self, dx, dy, cachePix=None, pageOffset=None):
log.debug('scroll %s %s %s %s %s', dx, dy, self.dataModel.inLimits((self.dataModel.getOffset() - dx)), 'offset',
self.dataModel.getOffset())
if dx != 0:
if self.dataModel.inLimits((self.dataModel.getOffset() - dx)):
self.dataModel.slide(dx)
self.draw(refresh=True)
# self.scroll_h(dx)
if dy != 0:
if dy > 0:
if self.dataModel.getOffset() == 0:
log.debug('OFFSET == 0')
return
if dy < 0:
tsize = sum([asm.get_length() for asm in self.OPCODES])
if self.dataModel.getOffset() + tsize == self.dataModel.getDataSize():
log.debug('END')
return
self.scroll_v(dy, cachePix, pageOffset)
def moveCursor(self, direction):
cursorX, cursorY = self.cursor.getPosition()
if direction == Directions.Left:
asm = self.OPCODES[cursorY]
if cursorX == 0:
if cursorY == 0:
# if first line, scroll
self.scroll(0, 1)
self.cursor.moveAbsolute(0, 0)
else:
# move to last token from previous line
asm_prev = self.OPCODES[cursorY - 1]
idx = asm_prev.getEndCursor()
self.cursor.moveAbsolute(idx, cursorY - 1)
else:
x, dy = asm.getNextCursor(cursorX, direction=Directions.Left)
self.cursor.move(-(cursorX - x), dy)
if direction == Directions.Right:
asm = self.OPCODES[cursorY]
x, dy = asm.getNextCursor(cursorX, direction=Directions.Right)
if cursorY == self.ROWS - 1 and dy > 0:
self.scroll(0, -1)
self.cursor.moveAbsolute(0, cursorY)
else:
if cursorY + dy >= len(self.OPCODES):
dy = 0
self.cursor.move(x - cursorX, dy)
if direction == Directions.Down:
if cursorY == self.ROWS - 1:
# move cursor to first token
self.scroll(0, -1)
self.cursor.moveAbsolute(0, cursorY)
else:
# move next line, to nearest token on columns
if cursorY + 1 < len(self.OPCODES):
asm = self.OPCODES[cursorY + 1]
x = asm.getNearestCursor(cursorX)
self.cursor.moveAbsolute(x, cursorY + 1)
if direction == Directions.Up:
if cursorY == 0:
# move cursor to first token
self.scroll(0, 1)
self.cursor.moveAbsolute(0, cursorY)
else:
# move next line, to nearest token on columns
asm = self.OPCODES[cursorY - 1]
x = asm.getNearestCursor(cursorX)
self.cursor.moveAbsolute(x, cursorY - 1)
if direction == Directions.End:
pass
if direction == Directions.Home:
self.cursor.moveAbsolute(0, 0)
if direction == Directions.CtrlHome:
self.goTo(0)
if direction == Directions.CtrlEnd:
self.dataModel.slideToLastPage()
self.draw(refresh=True)
self.cursor.moveAbsolute(self.COLUMNS - 1, self.ROWS - 1)
def _followBranch(self):
cursorX, cursorY = self.cursor.getPosition()
asm = self.OPCODES[cursorY]
if asm.isBranch():
value = asm.branchAddress()
if value:
fofs = self.plugin.disasmVAtoFA(value)
if fofs is not None:
rowOfs = self._getOffsetOfRow(cursorY)
if rowOfs is not None:
self.FlowHistory.append(rowOfs + self.dataModel.getOffset())
self.goTo(fofs)
def _followBranchHistory(self):
if len(self.FlowHistory) > 0:
offset = self.FlowHistory[-1]
del self.FlowHistory[-1]
self.goTo(offset)
def handleKeyEvent(self, modifiers, key, event=None):
if event.type() == QtCore.QEvent.KeyRelease:
if key == QtCore.Qt.Key_Shift:
self.stopSelection()
return True
if event.type() == QtCore.QEvent.KeyPress:
if modifiers == QtCore.Qt.ShiftModifier:
keys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Down, QtCore.Qt.Key_Up,
QtCore.Qt.Key_End, QtCore.Qt.Key_Home]
if key in keys:
self.startSelection()
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Right:
self.dataModel.slide(1)
self.addop((self.scroll, -1, 0))
if key == QtCore.Qt.Key_Left:
self.dataModel.slide(-1)
self.addop((self.scroll, 1, 0))
if key == QtCore.Qt.Key_Down:
self.addop((self.scroll, 0, -1))
self.addop((self.draw,))
if key == QtCore.Qt.Key_Up:
self.addop((self.scroll, 0, 1))
self.addop((self.draw,))
if key == QtCore.Qt.Key_End:
# not supported
pass
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.CtrlHome)
self.addop((self.draw,))
# self.draw()
return True
else: # elif modifiers == QtCore.Qt.NoModifier:
if key == QtCore.Qt.Key_Escape:
self.selector.resetSelections()
self.addop((self.draw,))
if key == QtCore.Qt.Key_Left:
self.moveCursor(Directions.Left)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_Right:
self.moveCursor(Directions.Right)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_Down:
self.moveCursor(Directions.Down)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_End:
self.moveCursor(Directions.End)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.Home)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_Up:
self.moveCursor(Directions.Up)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_PageDown:
self.addop((self.scrollPages, 1))
self.addop((self.draw,))
if key == QtCore.Qt.Key_PageUp:
self.addop((self.scrollPages, -1))
self.addop((self.draw,))
if key == QtCore.Qt.Key_Return:
self.addop((self._followBranch,))
self.addop((self.draw,))
if key == QtCore.Qt.Key_Escape:
self.addop((self._followBranchHistory,))
self.addop((self.draw,))
return True
return False
def addop(self, t):
self.Ops.append(t)
def getHeaderInfo(self):
return 'Disasm listing'
``` |
{
"source": "jinchihe/fairing",
"score": 2
} |
#### File: examples/simple/main.py
```python
import os
import fairing
import tensorflow as tf
# Setting up google container repositories (GCR) for storing output containers
# You can use any docker container registry istead of GCR
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
DOCKER_REGISTRY = 'gcr.io/{}/fairing-job'.format(GCP_PROJECT)
fairing.config.set_builder('append',base_image='tensorflow/tensorflow:1.13.1-py3', registry=DOCKER_REGISTRY, push=True)
fairing.config.set_deployer('job')
def train():
hostname = tf.constant(os.environ['HOSTNAME'])
sess = tf.Session()
print('Hostname: ', sess.run(hostname).decode('utf-8'))
if __name__ == '__main__':
remote_train = fairing.config.fn(train)
remote_train()
```
#### File: builders/append/append.py
```python
from timeit import default_timer as timer
import httplib2
import os
import logging
import io
import tarfile
from fairing.builders.base_builder import BaseBuilder
from fairing.constants import constants
from docker import APIClient
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import append
from containerregistry.client.v2_2 import docker_image as v2_2_image
from containerregistry.client.v2_2.save_ import tarball
from containerregistry.client.v2_2 import docker_session
from containerregistry.transport import transport_pool
from containerregistry.transform.v2_2 import metadata
logger = logging.getLogger(__name__)
class AppendBuilder(BaseBuilder):
"""Builds a docker image by appending a new layer tarball to an existing
base image. Does not require docker and runs in userspace.
Args:
registry {str} -- Registry to push image to. Required.
Example: gcr.io/kubeflow-images (default: {None})
base_image {str} -- Base image to use for the image build (default:
{constants.DEFAULT_BASE_IMAGE})
preprocessor {BasePreProcessor} -- Preprocessor to use to modify inputs
before sending them to docker build
push {bool} -- Whether or not to push the image to the registry
"""
def __init__(self,
registry=None,
image_name=constants.DEFAULT_IMAGE_NAME,
base_image=constants.DEFAULT_BASE_IMAGE,
push=True,
preprocessor=None):
super().__init__(
registry=registry,
image_name=image_name,
base_image=base_image,
push=push,
preprocessor=preprocessor,
)
def build(self):
"""Will be called when the build needs to start"""
transport = transport_pool.Http(httplib2.Http)
src = docker_name.Tag(self.base_image, strict=False)
logger.warn("Building image using Append builder...")
start = timer()
new_img = self._build(transport, src)
end = timer()
logger.warn("Image successfully built in {}s.".format(end-start))
dst = docker_name.Tag(
self.full_image_name(self.context_hash), strict=False)
if self.push:
self.timed_push(transport, src, new_img, dst)
else:
# TODO(r2d4):
# Load image into local daemon. This wouldn't provide any speedup
# over using the docker daemon directly.
pass
def _build(self, transport, src):
file, hash = self.preprocessor.context_tar_gz()
self.context_file, self.context_hash = file, hash
self.image_tag = self.full_image_name(self.context_hash)
creds = docker_creds.DefaultKeychain.Resolve(src)
with v2_2_image.FromRegistry(src, creds, transport) as src_image:
with open(self.context_file, 'rb') as f:
new_img = append.Layer(src_image, f.read(),
overrides=metadata.Overrides(cmd=self.preprocessor.get_command(),
user='0',
env={"FAIRING_RUNTIME": "1"}))
return new_img
def _push(self, transport, src, img, dst):
creds = docker_creds.DefaultKeychain.Resolve(dst)
with docker_session.Push(
dst, creds, transport, mount=[src.as_repository()]) as session:
logger.warn("Uploading {}".format(self.image_tag))
session.upload(img)
os.remove(self.context_file)
def timed_push(self, transport, src, img, dst):
logger.warn("Pushing image {}...".format(self.image_tag))
start = timer()
self._push(transport, src, img, dst)
end = timer()
logger.warn(
"Pushed image {} in {}s.".format(self.image_tag, end-start))
```
#### File: deployers/tfjob/tfjob.py
```python
from kubernetes import client as k8s_client
from fairing.deployers.job.job import Job
from fairing.kubernetes.manager import TF_JOB_VERSION
DEFAULT_JOB_NAME = 'fairing-tfjob-'
DEPLOYER_TYPE = 'tfjob'
class TfJob(Job):
def __init__(self, namespace=None, worker_count=1, ps_count=0,
chief_count=1, runs=1, job_name=DEFAULT_JOB_NAME, stream_log=True, labels=None,
pod_spec_mutators=None):
super(TfJob, self).__init__(namespace, runs, job_name=job_name, stream_log=stream_log,
deployer_type=DEPLOYER_TYPE, labels=labels,
pod_spec_mutators=pod_spec_mutators)
self.distribution = {
'Worker': worker_count,
'PS': ps_count,
'Chief': chief_count
}
def create_resource(self):
self.created_tfjob = self.backend.create_tf_job(self.namespace, self.deployment_spec)
return self.created_tfjob['metadata']['name']
def generate_deployment_spec(self, pod_template_spec):
"""Returns a TFJob template"""
self.set_container_name(pod_template_spec)
worker_replica_spec = {}
worker_replica_spec['replicas'] = self.distribution['Worker']
worker_replica_spec['template'] = pod_template_spec
ps_replica_spec = {}
ps_replica_spec['replicas'] = self.distribution.get('PS', 0)
ps_replica_spec['template'] = pod_template_spec
chief_replica_spec = {}
chief_replica_spec['replicas'] = self.distribution.get('Chief', 0)
chief_replica_spec['template'] = pod_template_spec
spec = {}
spec['tfReplicaSpecs'] = {}
spec['tfReplicaSpecs']['Worker'] = worker_replica_spec
if chief_replica_spec['replicas'] > 0:
spec['tfReplicaSpecs']['Chief'] = chief_replica_spec
if ps_replica_spec['replicas'] > 0:
spec['tfReplicaSpecs']['PS'] = ps_replica_spec
tf_job = {}
tf_job['kind'] = 'TFJob'
tf_job['apiVersion'] = 'kubeflow.org/' + TF_JOB_VERSION
tf_job['metadata'] = k8s_client.V1ObjectMeta(generate_name=self.job_name)
tf_job['spec'] = spec
return tf_job
def set_container_name(self, pod_template_spec):
"""Sets the name of the main container to `tensorflow`.
This is required for TfJobs"""
pod_template_spec.spec.containers[0].name = 'tensorflow'
def get_logs(self):
name = self.created_tfjob['metadata']['name']
namespace = self.created_tfjob['metadata']['namespace']
labels = {
'tf-replica-index': '0',
'tf-replica-type': 'worker',
'tf-job-name': name
}
self.backend.log(name, namespace, labels)
```
#### File: integration/gcp/test_running_in_notebooks.py
```python
import pytest
import fairing
import sys
import io
import tempfile
import random
import papermill
import os
def execute_notebook(notebook_path):
temp_dir = tempfile.mkdtemp()
notebook_output_path = os.path.join(temp_dir,"out.ipynb")
papermill.execute_notebook(notebook_path, notebook_output_path, cwd=os.path.dirname(notebook_path))
return notebook_output_path
def run_notebook_test(notebook_path, expected_messages):
output_path = execute_notebook(notebook_path)
actual_output = open(output_path, 'r').read()
# TODO (karthikv2k): use something like https://github.com/nteract/scrapbook
# for reading notebooks
for expected_message in expected_messages:
assert expected_message in actual_output
def test_xgboost_highlevel_apis():
file_dir = os.path.dirname(__file__)
notebook_rel_path = "../../../examples/prediction/xgboost-high-level-apis.ipynb"
notebook_abs_path = os.path.normpath(os.path.join(file_dir, notebook_rel_path))
# TODO (karthikv2k): find a better way to test notebook execution success
expected_messages = [
"Model export success: trained_ames_model.dat", #KF training
"Access job logs at the following URL:", #GCP managed submission success
"Prediction endpoint: http", #create endpoint success
]
run_notebook_test(notebook_abs_path, expected_messages)
def test_lightgbm():
file_dir = os.path.dirname(__file__)
notebook_rel_path = "../../../examples/lightgbm/distributed-training.ipynb"
notebook_abs_path = os.path.normpath(os.path.join(file_dir, notebook_rel_path))
# TODO (karthikv2k): find a better way to test notebook execution success
expected_messages = [
"Copying gs://fairing-lightgbm/regression-example/regression.train.weight",
"[LightGBM] [Info] Finished initializing network", #dist training setup
"[LightGBM] [Info] Iteration:10, valid_1 l2 : 0.2",
"[LightGBM] [Info] Finished training",
"Prediction mean: 0.5",
", count: 500"
]
run_notebook_test(notebook_abs_path, expected_messages)
``` |
{
"source": "jinchihe/kfp-tekton",
"score": 2
} |
#### File: kfp_tekton/compiler/_op_to_template.py
```python
from collections import OrderedDict
from kfp.compiler._k8s_helper import convert_k8s_obj_to_json
from kfp.compiler._op_to_template import _process_obj, _inputs_to_json, _outputs_to_json
from kfp import dsl
from kfp.dsl._container_op import BaseOp
from .. import tekton_api_version
class literal_str(str):
"""Literal string class for pyyaml
Literal string class is used for converting string with newline into
yaml's literal string format with '|'. In pyyaml, literal string
conversion is not natively supported in the default dumper.
Therefore, we need to define this class as part of the dumper
before compiling it into yaml.
"""
pass
def _process_base_ops(op: BaseOp):
"""Recursively go through the attrs listed in `attrs_with_pipelineparams`
and sanitize and replace pipeline params with template var string.
Returns a processed `BaseOp`.
NOTE this is an in-place update to `BaseOp`'s attributes (i.e. the ones
specified in `attrs_with_pipelineparams`, all `PipelineParam` are replaced
with the corresponding template variable strings).
Args:
op {BaseOp}: class that inherits from BaseOp
Returns:
BaseOp
"""
# map param's (unsanitized pattern or serialized str pattern) -> input param var str
map_to_tmpl_var = {
(param.pattern or str(param)): '$(inputs.params.%s)' % param.full_name # Tekton change
for param in op.inputs
}
# process all attr with pipelineParams except inputs and outputs parameters
for key in op.attrs_with_pipelineparams:
setattr(op, key, _process_obj(getattr(op, key), map_to_tmpl_var))
return op
def _op_to_template(op: BaseOp):
"""Generate template given an operator inherited from BaseOp."""
# NOTE in-place update to BaseOp
# replace all PipelineParams with template var strings
processed_op = _process_base_ops(op)
if isinstance(op, dsl.ContainerOp):
# default output artifacts
output_artifact_paths = OrderedDict(op.output_artifact_paths)
# print(op.output_artifact_paths)
# This should have been as easy as output_artifact_paths.update(op.file_outputs), but the _outputs_to_json function changes the output names and we must do the same here, so that the names are the same
output_artifact_paths.update(sorted(((param.full_name, processed_op.file_outputs[param.name]) for param in processed_op.outputs.values()), key=lambda x: x[0]))
output_artifacts = [
# convert_k8s_obj_to_json(
# ArtifactLocation.create_artifact_for_s3(
# op.artifact_location,
# name=name,
# path=path,
# key='runs/{{workflow.uid}}/{{pod.name}}/' + name + '.tgz'))
# for name, path in output_artifact_paths.items()
]
# workflow template
container = convert_k8s_obj_to_json(
processed_op.container
)
step = {'name': processed_op.name}
step.update(container)
template = {
'apiVersion': tekton_api_version,
'kind': 'Task',
'metadata': {'name': processed_op.name},
'spec': {
'steps': [step]
}
}
elif isinstance(op, dsl.ResourceOp):
# no output artifacts
output_artifacts = []
# task template
template = {
'apiVersion': tekton_api_version,
'kind': 'Task',
'metadata': {'name': processed_op.name},
'spec': {
"params": [
{
"description": "Action on the resource",
"name": "action",
"type": "string"
},
{
"default": "strategic",
"description": "Merge strategy when using action patch",
"name": "merge-strategy",
"type": "string"
},
{
"description": "Content of the resource to deploy",
"name": "manifest",
"type": "string"
},
{
"default": "",
"description": "An express to retrieval data from resource.",
"name": "output",
"type": "string"
},
{
"default": "",
"description": "A label selector express to decide if the action on resource is success.",
"name": "success-condition",
"type": "string"
},
{
"default": "",
"description": "A label selector express to decide if the action on resource is failure.",
"name": "failure-condition",
"type": "string"
},
{
"default": "index.docker.io/fenglixa/kubeclient:v0.0.1", # Todo: The image need to be replaced, once there are official images from tekton
"description": "Kubectl wrapper image",
"name": "image",
"type": "string"
},
{
"default": "false",
"description": "Enable set owner reference for created resource.",
"name": "set-ownerreference",
"type": "string"
}
],
'steps': [
{
"args": [
"--action=$(params.action)",
"--merge-strategy=$(params.merge-strategy)",
"--manifest=$(params.manifest)",
"--output=$(params.output)",
"--success-condition=$(params.success-condition)",
"--failure-condition=$(params.failure-condition)",
"--set-ownerreference=$(params.set-ownerreference)"
],
"image": "$(params.image)",
"name": processed_op.name,
"resources": {}
}
]
}
}
# initContainers
if processed_op.init_containers:
steps = processed_op.init_containers.copy()
steps.extend(template['spec']['steps'])
template['spec']['steps'] = steps
# inputs
input_artifact_paths = processed_op.input_artifact_paths if isinstance(processed_op, dsl.ContainerOp) else None
artifact_arguments = processed_op.artifact_arguments if isinstance(processed_op, dsl.ContainerOp) else None
inputs = _inputs_to_json(processed_op.inputs, input_artifact_paths, artifact_arguments)
if 'parameters' in inputs:
if isinstance(processed_op, dsl.ContainerOp):
template['spec']['params'] = inputs['parameters']
elif isinstance(op, dsl.ResourceOp):
template['spec']['params'].extend(inputs['parameters'])
elif 'artifacts' in inputs:
raise NotImplementedError("input artifacts are not yet implemented")
# outputs
if isinstance(op, dsl.ContainerOp):
op_outputs = processed_op.outputs
param_outputs = processed_op.file_outputs
elif isinstance(op, dsl.ResourceOp):
op_outputs = {}
param_outputs = {}
outputs_dict = _outputs_to_json(op, op_outputs, param_outputs, output_artifacts)
if outputs_dict:
"""
Since Tekton results need to be under /tekton/results. If file output paths cannot be
configured to /tekton/results, we need to create the below copy step for moving
file outputs to the Tekton destination. BusyBox is recommended to be used on
small tasks because it's relatively lightweight and small compared to the ubuntu and
bash images.
- image: busybox
name: copy-results
script: |
#!/bin/sh
set -exo pipefail
cp $LOCALPATH $(results.data.path);
"""
template['spec']['results'] = []
copy_results_step = {
'image': 'busybox',
'name': 'copy-results',
'script': '#!/bin/sh\nset -exo pipefail\n'
}
volume_mount_step_template = []
volume_template = []
mounted_paths = []
for name, path in param_outputs.items():
name = name.replace('_', '-') # replace '_' to '-' since tekton results doesn't support underscore
template['spec']['results'].append({
'name': name,
'description': path
})
# replace all occurrences of the output file path with the Tekton output parameter expression
need_copy_step = True
for s in template['spec']['steps']:
if 'command' in s:
commands = []
for c in s['command']:
if path in c:
c = c.replace(path, '$(results.%s.path)' % name)
need_copy_step = False
commands.append(c)
s['command'] = commands
if 'args' in s:
args = []
for a in s['args']:
if path in a:
a = a.replace(path, '$(results.%s.path)' % name)
need_copy_step = False
args.append(a)
s['args'] = args
# If file output path cannot be found/replaced, use emptyDir to copy it to the tekton/results path
if need_copy_step:
copy_results_step['script'] = copy_results_step['script'] + 'cp ' + path + ' $(results.%s.path);' % name + '\n'
mountPath = path.rsplit("/", 1)[0]
if mountPath not in mounted_paths:
volume_mount_step_template.append({'name': name, 'mountPath': path.rsplit("/", 1)[0]})
volume_template.append({'name': name, 'emptyDir': {}})
mounted_paths.append(mountPath)
if mounted_paths:
copy_results_step['script'] = literal_str(copy_results_step['script'])
template['spec']['steps'].append(copy_results_step)
template['spec']['stepTemplate'] = {}
template['spec']['stepTemplate']['volumeMounts'] = volume_mount_step_template
template['spec']['volumes'] = volume_template
# **********************************************************
# NOTE: the following features are still under development
# **********************************************************
# metadata
if processed_op.pod_annotations or processed_op.pod_labels:
template.setdefault('metadata', {}) # Tekton change, don't wipe out existing metadata
if processed_op.pod_annotations:
template['metadata']['annotations'] = processed_op.pod_annotations
if processed_op.pod_labels:
template['metadata']['labels'] = processed_op.pod_labels
# sidecars
if processed_op.sidecars:
template['spec']['sidecars'] = processed_op.sidecars
# volumes
if processed_op.volumes:
template['spec']['volumes'] = template['spec'].get('volume', []) + [convert_k8s_obj_to_json(volume) for volume in processed_op.volumes]
template['spec']['volumes'].sort(key=lambda x: x['name'])
# Display name
if processed_op.display_name:
template.setdefault('metadata', {}).setdefault('annotations', {})['pipelines.kubeflow.org/task_display_name'] = processed_op.display_name
if isinstance(op, dsl.ContainerOp) and op._metadata:
import json
template.setdefault('metadata', {}).setdefault('annotations', {})['pipelines.kubeflow.org/component_spec'] = json.dumps(op._metadata.to_dict(), sort_keys=True)
return template
``` |
{
"source": "jinchihe/kfserving",
"score": 2
} |
#### File: kfserving/kfserving/model.py
```python
from typing import List, Any
class KFModel(object):
def __init__(self, name: str):
self.name = name
self.ready = False
def load(self):
raise NotImplementedError
def preprocess(self, inputs: List) -> List:
raise NotImplementedError
def predict(self, inputs: List) -> List:
raise NotImplementedError
def postprocess(self, inputs: List) -> List:
raise NotImplementedError
# TODO return type TBD
def explain(self, inputs: List) -> Any:
raise NotImplementedError
# TODO return type TBD
def detectOutlier(self, inputs: List):
raise NotImplementedError
```
#### File: kfserving/kfserving/storage.py
```python
import logging
import tempfile
import os
import re
from minio import Minio
from google.cloud import storage
from google.auth import exceptions
_GCS_PREFIX = "gs://"
_S3_PREFIX = "s3://"
_LOCAL_PREFIX = "file://"
class Storage(object):
@staticmethod
def download(uri: str) -> str:
logging.info("Copying contents of %s to local" % uri)
if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
return Storage._download_local(uri)
temp_dir = tempfile.mkdtemp()
if uri.startswith(_GCS_PREFIX):
Storage._download_gcs(uri, temp_dir)
elif uri.startswith(_S3_PREFIX):
Storage._download_s3(uri, temp_dir)
else:
raise Exception("Cannot recognize storage type for " + uri +
"\n'%s', '%s', and '%s' are the current available storage type." %
(_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
logging.info("Successfully copied %s to %s" % (uri, temp_dir))
return temp_dir
@staticmethod
def _download_s3(uri, temp_dir: str):
client = Storage._create_minio_client()
bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
bucket_name = bucket_args[0]
bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
for obj in objects:
# Replace any prefix from the object key with temp_dir
subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))
@staticmethod
def _download_gcs(uri, temp_dir: str):
try:
storage_client = storage.Client()
except exceptions.DefaultCredentialsError as e:
storage_client = storage.Client.create_anonymous_client()
bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
bucket_name = bucket_args[0]
bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
bucket = storage_client.bucket(bucket_name)
blobs = bucket.list_blobs(prefix=bucket_path)
for blob in blobs:
# Replace any prefix from the object key with temp_dir
subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
# Create necessary subdirectory to store the object locally
if "/" in subdir_object_key:
local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
if not os.path.isdir(local_object_dir):
os.makedirs(local_object_dir, exist_ok=True)
blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))
@staticmethod
def _download_local(uri):
local_path = uri.replace(_LOCAL_PREFIX, "", 1)
if not os.path.exists(local_path):
raise Exception("Local path %s does not exist." % (uri))
return local_path
@staticmethod
def _create_minio_client():
# Remove possible http scheme for Minio
url = re.compile(r"https?://")
minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")),
access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
secure=True)
return minioClient
``` |
{
"source": "jinchihe/testing",
"score": 2
} |
#### File: kubeflow/tests/prow_artifacts_test.py
```python
import json
import os
import unittest
import mock
from kubeflow.testing import prow_artifacts
from google.cloud import storage # pylint: disable=no-name-in-module
class TestProw(unittest.TestCase):
@mock.patch("kubeflow.testing.prow_artifacts.time.time")
def testCreateStartedPresubmit(self, mock_time): # pylint: disable=no-self-use
"""Test create started for presubmit job."""
mock_time.return_value = 1000
os.environ["REPO_OWNER"] = "fake_org"
os.environ["REPO_NAME"] = "fake_name"
os.environ["PULL_PULL_SHA"] = "123abc"
expected = {
"timestamp": 1000,
"repos": {
"fake_org/fake_name": "123abc",
},
"metadata": {
"workflow1-ui": "http://argo",
},
}
ui_urls = {
"workflow1": "http://argo",
}
actual = prow_artifacts.create_started(ui_urls)
self.assertEqual(expected, json.loads(actual))
@mock.patch("kubeflow.testing.prow_artifacts.time.time")
def testCreateFinished(self, mock_time): # pylint: disable=no-self-use
"""Test create finished job."""
mock_time.return_value = 1000
workflow_phase = {
"wfA": "Succeeded"
}
test_urls = {
"wfA": "https://example.com",
}
expected = {
"timestamp": 1000,
"result": "FAILED",
"metadata": {
"wfA-phase": "Succeeded",
"wfA-ui": "https://example.com",
},
}
actual = prow_artifacts.create_finished(False, workflow_phase, test_urls)
self.assertEqual(expected, json.loads(actual))
@mock.patch("kubeflow.testing.prow_artifacts.util.run")
def testCopyArtifactsPresubmit(self, mock_run): # pylint: disable=no-self-use
"""Test copy artifacts to GCS."""
os.environ = {}
os.environ["REPO_OWNER"] = "fake_org"
os.environ["REPO_NAME"] = "fake_name"
os.environ["PULL_NUMBER"] = "72"
os.environ["BUILD_NUMBER"] = "100"
os.environ["PULL_PULL_SHA"] = "123abc"
os.environ["JOB_NAME"] = "kubeflow-presubmit"
os.environ["JOB_TYPE"] = "presubmit"
args = ["--artifacts_dir=/tmp/some/dir", "copy_artifacts",
"--bucket=some_bucket"]
prow_artifacts.main(args)
mock_run.assert_called_once_with(
["gsutil", "-m", "rsync", "-r", "/tmp/some/dir",
"gs://some_bucket/pr-logs/pull/fake_org_fake_name/72/kubeflow-presubmit"
"/100"],
)
def testCreateSymlink(self): # pylint: disable=no-self-use
gcs_client = mock.MagicMock(spec=storage.Client)
mock_bucket = mock.MagicMock(spec=storage.Bucket)
gcs_client.get_bucket.return_value = mock_bucket
mock_blob = mock.MagicMock(spec=storage.Blob)
mock_bucket.blob.return_value = mock_blob
# We can't add the decorator the instance method because that would
# interfere with creating gcs_client since storage.Client would then
# point to the mock and not the actual class.
with mock.patch("kubeflow.testing.prow_artifacts.storage"
".Client") as mock_client:
mock_client.return_value = gcs_client
os.environ["REPO_OWNER"] = "fake_org"
os.environ["REPO_NAME"] = "fake_name"
os.environ["PULL_NUMBER"] = "72"
os.environ["BUILD_NUMBER"] = "100"
os.environ["PULL_PULL_SHA"] = "123abc"
os.environ["JOB_NAME"] = "kubeflow-presubmit"
os.environ["JOB_TYPE"] = "presubmit"
args = ["--artifacts_dir=/tmp/some/dir", "create_pr_symlink",
"--bucket=some-bucket"]
prow_artifacts.main(args)
mock_blob.upload_from_string.assert_called_once_with(
"gs://some-bucket/pr-logs/pull/fake_org_fake_name/72"
"/kubeflow-presubmit/100")
@mock.patch("kubeflow.testing.test_util.get_num_failures")
@mock.patch("kubeflow.testing.prow_artifacts._get_actual_junit_files")
def testCheckNoErrorsSuccess(self, mock_get_junit, mock_get_failures):
# Verify that check no errors returns true when there are no errors
gcs_client = mock.MagicMock(spec=storage.Client)
artifacts_dir = "gs://some_dir"
mock_get_junit.return_value = set(["junit_1.xml"])
mock_get_failures.return_value = 0
self.assertTrue(prow_artifacts.check_no_errors(gcs_client, artifacts_dir))
@mock.patch("kubeflow.testing.test_util.get_num_failures")
@mock.patch("kubeflow.testing.prow_artifacts._get_actual_junit_files")
def testCheckNoErrorsFailure(self, mock_get_junit, mock_get_failures):
# Verify that check no errors returns false when a junit
# file reports an error.
gcs_client = mock.MagicMock(spec=storage.Client)
artifacts_dir = "gs://some_dir"
mock_get_junit.return_value = set(["junit_1.xml"])
mock_get_failures.return_value = 1
self.assertFalse(prow_artifacts.check_no_errors(gcs_client, artifacts_dir))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jinchuika/covid-modeler",
"score": 3
} |
#### File: modeler/models/linear_model.py
```python
from .base_model import BaseModel
import numpy as np
from sklearn.linear_model import LinearRegression
class LinearModel(BaseModel):
plot_name = 'Linear'
def train(self):
x, y = np.reshape(self.x_train, (-1, 1)), np.reshape(self.y_train, (-1, 1))
self.model = LinearRegression().fit(x, y)
self.is_trained = True
def predict(self):
y_pred = self.model.predict(self.x_pred.reshape(-1, 1))
self.y_pred = y_pred.reshape(y_pred.size)
self.is_predicted = True
```
#### File: covid-modeler/modeler/wrapper.py
```python
import os
from datetime import timedelta
import pandas as pd
import numpy as np
import chart_studio
import chart_studio.plotly as py
import plotly.graph_objects as go
from . import countries, models
class Modeler:
default_models = {
'linear': models.LinearModel,
'logistic': models.LogisticModel,
'exponential': models.ExponentialModel
}
processed_models = {}
record = ''
def __init__(self, country=None, predict_len=15, use_default_models=True, mode='notebook', output_folder='output', plot_mode='image', show_plot=False):
self.predict_len = predict_len
self.c = countries.CountryData()
if country is not None:
self.set_country(country)
if use_default_models:
self.models = self.default_models
# export options
if mode not in ('notebook', 'cli'):
raise RuntimeError('El modo debe ser `notebook` o `cli`')
self.mode = mode
self.output_folder = output_folder
self.plot_mode = plot_mode
self.show_plot = show_plot
def log(self, text):
self.record += text
def process(self):
self.record = ''
if len(self.data[1]) >= 7:
current = self.data[1].astype(int)[-1]
lastweek = self.data[1].astype(int)[-8]
if current > lastweek:
self.log(f'Resultados para *{self.country_name}*')
self.log('\n** Basado en los datos de la última semana **\n')
self.log(f'\n\tCasos confirmados en {self.data[2][-1]} \t {current}')
self.log(f'\n\tCasos confirmados en {self.data[2][-8]} \t {lastweek}')
ratio = current / lastweek
self.log(f'\n\tProporción: {round(ratio, 2)}')
self.log(f'\n\tIncremento semanal: {round( 100 * (ratio - 1), 1)}%')
dailypercentchange = round( 100 * (pow(ratio, 1/7) - 1), 1)
self.log(f'\n\tIncremento diario: {dailypercentchange}% por día')
recentdbltime = round( 7 * np.log(2) / np.log(ratio), 1)
self.log(f'\n\tTiempo que tarda en duplicarse (al ritmo actual): {recentdbltime} días')
for name, model in self.models.items():
self.processed_models[name] = model(
x_train=self.data[0],
y_train=self.data[1],
predict_len=self.predict_len,
start_date=self.data[2][0]
)
self.create_record()
self.plot()
self.export()
def set_country(self, country):
self.data = self.c.get_country(country)
self.country_name = country
def create_record(self):
best_r2 = 0
best_model = ''
for name, model in self.processed_models.items():
self.log(model.record)
if hasattr(model, 'r2') and model.r2 > best_r2:
best_r2 = model.r2
best_model = model.plot_name
if best_r2 > 0:
self.log(f"\nMejor modelo: {best_model} (R2 = {best_r2})")
def plot(self):
plot_data = []
end_date = pd.to_datetime(self.data[2][0]).date() + timedelta(days=len(self.data[2]))
original_data = go.Scatter(
x=pd.date_range(start=str(self.data[2][0]), end=end_date),
y=self.data[1],
mode='markers',
name='Casos confirmados'
)
plot_data.append(original_data)
for name, model in self.processed_models.items():
plot_data.append(model.chart)
layout = dict(
title = self.country_name,
xaxis_type='date'
)
self.fig = go.Figure(data=plot_data, layout=layout)
def export(self):
if self.mode == 'notebook':
print(self.record)
self.fig.show()
return
# Crear la carpeta de destino
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
with open(os.path.join(self.output_folder, f'results_{self.country_name}.txt'), 'w', encoding='utf8') as output_file:
print(self.record)
output_file.write(self.record)
print("******************************************")
print(f"Resultados escritos en {output_file.name}")
print("******************************************")
# export the plot
if self.plot_mode == 'image':
self.export_image_plot()
if self.plot_mode == 'html':
self.export_html_plot()
def export_image_plot(self):
try:
file_name = os.path.join(self.output_folder, f'results_{self.country_name}.png')
self.fig.write_image(os.path.join(self.output_folder, f'results_{self.country_name}.png'))
print(f'El gráfico fue exportado en {file_name}')
if self.show_plot:
self.fig.show()
except ValueError as e:
print("Hubo un error al exportar la imagen")
print("Este error probablemente se debe a que se requiere la instalación de Orca para exportar imágenes")
print("La guía de instalación se encuentra en: https://github.com/plotly/orca")
def export_html_plot(self):
file_name = os.path.join(self.output_folder, f'results_{self.country_name}.html')
self.fig.write_html(file_name)
print(f'El gráfico fue exportado en {file_name}')
if self.show_plot:
self.fig.show()
``` |
{
"source": "jinchuika/fsp-paypal",
"score": 3
} |
#### File: pagos/tests/test_forms.py
```python
import pytest
from mixer.backend.django import mixer
from pagos.forms import *
from pagos.models import *
pytestmark = pytest.mark.django_db
class TestDonationForm:
def test_form(self):
form = DonationForm(data={})
card_type = mixer.blend('pagos.CardType', alias='visa', card_type='visa')
assert form.is_valid() is False, 'Should be invalida if no data'
form = DonationForm(data={
'first_name': 'Joe',
'last_name': 'Shopper',
'mail': '<EMAIL>',
'card_type': "asd",
'number': '4417119669820331',
'expire_month': "11",
'expire_year': '2018',
'cvv2': '874',
'total': '2.00'})
assert 'card_type' in form.errors, 'Should be a valid card type'
form = DonationForm(data={
'first_name': "Joe",
'last_name': "Shopper",
'mail': '<EMAIL>',
'card_type': card_type.id,
'number': "4417119669820331",
'expire_month': "11",
'expire_year': "2018",
'cvv2': "874",
'total': "2.00"})
assert form.is_valid() is True, 'Should be valid'
```
#### File: fsp-paypal/pagos/views.py
```python
from django.http import HttpResponse
from django.views.generic import TemplateView, DetailView
from django.views.generic.edit import FormView
from .forms import DonationForm
from .models import CardType
import json
class PagoView(FormView):
form_class = DonationForm
template_name = 'pago.html'
success_url = 'http://funsepa.org/cms/es/gracias/'
def get_context_data(self, **kwargs):
context = super(PagoView, self).get_context_data(**kwargs)
david = self.request.GET.get('david', None)
navidad = self.request.GET.get('navidad', None)
if david:
context['david'] = True
if navidad:
context['navidad'] = True
return context
def form_valid(self, form):
instance = super(PagoView, self).form_valid(form)
return instance
class PagoDone(TemplateView):
template_name = 'done.html'
class CardTypeView(DetailView):
model = CardType
slug_field = 'card_type'
def get(self, *args, **kwargs):
card_type = CardType.objects.filter(card_type=kwargs.pop('slug')).first()
if card_type:
response = {
'id': card_type.id,
'card_type': card_type.card_type,
'name': card_type.alias}
else:
response = None
return HttpResponse(json.dumps(response))
``` |
{
"source": "jinchuika/u-seminario",
"score": 2
} |
#### File: u-seminario/main/models.py
```python
import os
from django.db import models
from django.db.models import Q
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator
from datetime import timedelta, date, time
from django.utils import timezone
from easy_thumbnails.fields import ThumbnailerImageField
class Perfil(models.Model):
GENERO_CHOICES = (
('M', 'Masculino'),
('F', 'Femenino'),)
user = models.OneToOneField(User, on_delete=models.CASCADE)
dpi = models.CharField(max_length=20, unique=True)
public = models.BooleanField(default=True)
genero = models.CharField(max_length=1, choices=GENERO_CHOICES, default='M')
fecha_nacimiento = models.DateField(null=True, blank=True)
direccion = models.CharField(max_length=150, null=True, blank=True)
foto = ThumbnailerImageField(
upload_to="perfil_usuario",
null=True,
blank=True,
editable=True,)
objects = models.Manager()
def get_nombre(self):
return self.user.first_name
nombre = property(get_nombre)
def get_apellido(self):
return self.user.last_name
apellido = property(get_apellido)
def get_absolute_url(self):
return reverse_lazy('perfil', kwargs={'pk': self.id})
class Meta:
verbose_name = 'perfil'
verbose_name_plural = 'perfiles'
def __str__(self):
return self.nombre + " " + self.apellido
class Producto(models.Model):
nombre = models.CharField(max_length=150)
def get_absolute_url(self):
return reverse_lazy('producto_detail', kwargs={'pk': self.id})
def precio_compra_actual(self, fecha=date.today()):
ultimo_precio = CompraPrecio.objects.filter(fecha__lte=fecha, producto=self).order_by('fecha').reverse()
if ultimo_precio.count() < 1:
return None
else:
return ultimo_precio[0]
precio_compra_actual = property(precio_compra_actual)
def precio_venta_actual(self,fecha=date.today()):
ultimo_precio = VentaPrecio.objects.filter(fecha__lte=fecha, producto=self).order_by('fecha').reverse()
if ultimo_precio.count() < 1:
return None
else:
return ultimo_precio[0]
precio_venta_actual = property(precio_venta_actual)
def compras(self):
compra_list = Compra.objects.filter(producto=self)
if compra_list.count() < 1:
return 0
else:
return sum(compra.cantidad for compra in compra_list)
compras = property(compras)
def ventas(self):
venta_list = VentaDetalle.objects.filter(producto=self)
if venta_list.count() < 1:
return 0
else:
return sum(venta.cantidad for venta in venta_list)
ventas = property(ventas)
def existencia(self):
return self.compras - self.ventas
existencia = property(existencia)
def __str__(self):
return str(self.nombre)
class Compra(models.Model):
producto = models.ForeignKey('Producto', related_name='compra')
cantidad = models.IntegerField()
fecha = models.DateField(default=timezone.now)
def get_precio(self):
precio = CompraPrecio.objects.filter(producto=self.producto, fecha__lte=self.fecha).order_by('fecha').last()
if precio:
return precio.precio
else:
return 0
precio = property(get_precio)
def total(self):
return self.cantidad * self.precio
def __str__(self):
return "No. " + str(self.id) + " - (" + str(self.fecha) + ")"
class CompraPrecio(models.Model):
producto = models.ForeignKey('Producto', related_name='precio_compra')
precio = models.DecimalField(max_digits=9, decimal_places=2)
fecha = models.DateField(default=timezone.now)
class Meta:
unique_together = ('producto', 'fecha')
def __str__(self):
return str(self.producto)+ " - " + str(self.fecha) + " - " + str(self.precio)
class Venta(models.Model):
vendedor = models.ForeignKey('Perfil', related_name='venta')
fecha = models.DateField(default=timezone.now)
def __str__(self):
return "No. " + str(self.id) + " - (" + str(self.fecha) + ")"
class VentaPrecio(models.Model):
producto = models.ForeignKey('Producto', related_name='precio_venta')
precio = models.DecimalField(max_digits=9, decimal_places=2)
fecha = models.DateField(default=timezone.now)
class Meta:
unique_together = ('producto', 'fecha')
def __str__(self):
return str(self.precio)
class VentaDetalle(models.Model):
venta = models.ForeignKey('Venta', related_name='detalle_venta')
producto = models.ForeignKey('Producto', related_name='venta')
cantidad = models.IntegerField()
def get_precio(self):
precio = VentaPrecio.objects.filter(producto=self.producto, fecha__lte=self.venta.fecha).order_by('fecha').last()
if precio:
return precio.precio
else:
return 0
precio = property(get_precio)
def total(self):
return self.cantidad * self.precio
def __str__(self):
return str(self.producto) + " - " + str(self.cantidad)
```
#### File: u-seminario/main/views.py
```python
import json
from django.http import HttpResponse
from django.db.models import Count, Sum
from django.shortcuts import render, redirect, get_object_or_404
from main.models import *
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, TemplateView
from django.views.generic.edit import CreateView, UpdateView
from main.forms import *
from main.mixins import VentaContextMixin
from main.bi import *
from braces.views import LoginRequiredMixin
def compra_all(request):
compra_list = Compra.objects.all()
context = {
'compra_list': compra_list
}
return render(request, 'compra/all.html', context)
class ProductoList(LoginRequiredMixin, ListView):
model = Producto
template_name = 'producto/all.html'
class ProductoView(LoginRequiredMixin, CreateView):
model = Producto
form_class = ProductoForm
template_name = 'producto/add.html'
success_url = reverse_lazy('producto_add')
def get_context_data(self, **kwargs):
context = super(ProductoView, self).get_context_data(**kwargs)
context['producto_list'] = Producto.objects.all()
return context
class ProductoDetail(LoginRequiredMixin, DetailView):
model = Producto
template_name = 'producto/detail.html'
class PrecioVentaAdd(LoginRequiredMixin, CreateView):
model = VentaPrecio
form_class = VentaPrecioForm
template_name = 'venta/precio_add.html'
success_url = reverse_lazy('producto_add')
class PrecioCompraAdd(CreateView):
model = CompraPrecio
form_class = CompraPrecioForm
template_name = 'compra/precio_add.html'
success_url = reverse_lazy('producto_add')
class CompraAdd(CreateView):
model = Compra
form_class = CompraForm
template_name = 'compra/form.html'
success_url = reverse_lazy('compra_all')
class CompraList(LoginRequiredMixin, ListView):
model = Compra
template_name = 'compra/all.html'
class VentaAdd(VentaContextMixin, CreateView):
model = Venta
template_name = 'venta/add.html'
success_url = reverse_lazy('producto_add')
form_class = VentaForm
def form_valid(self, form):
form.instance.vendedor = self.request.user.perfil
return super(VentaAdd, self).form_valid(form)
class VentaList(LoginRequiredMixin, ListView):
model = VentaDetalle
template_name = 'venta/all.html'
class PerfilView(LoginRequiredMixin, UpdateView):
template_name = 'user/perfil.html'
form_class = PerfilForm
model = Perfil
class Analytics(LoginRequiredMixin, TemplateView):
template_name = "bi/home.html"
def get_context_data(self, **kwargs):
context = super(Analytics, self).get_context_data(**kwargs)
resumen = []
mes = Calendario().get_mes_actual()
for dia in mes:
resumen.append({
'dia': dia,
'ventas': Venta.objects.filter(fecha=dia),
'compras': Compra.objects.filter(fecha=dia),
})
context['resumen'] = resumen
return context
class AnalyticsApi(LoginRequiredMixin, TemplateView):
def get(self, request):
qs = self.analyze_producto(request.GET.get('inicio', None), request.GET.get('fin', None))
qs_vendedor = self.analyze_vendedor(request.GET.get('inicio', None), request.GET.get('fin', None))
return HttpResponse(
json.dumps({
'productos': qs,
'vendedores': qs_vendedor})
)
def analyze_producto(self, fecha_inicio=None, fecha_fin=None):
venta_list = Venta.objects.all()
compra_list = Compra.objects.all()
if fecha_inicio:
venta_list = venta_list.filter(fecha__gte=fecha_inicio)
compra_list = compra_list.filter(fecha__gte=fecha_inicio)
if fecha_fin:
venta_list = venta_list.filter(fecha__lte=fecha_fin)
compra_list = compra_list.filter(fecha__lte=fecha_fin)
producto_list = Producto.objects.all()
lista = []
for producto in producto_list:
lista.append({
'producto': producto.nombre,
'cantidad': sum(venta.cantidad for venta in producto.venta.filter(venta__in=venta_list)),
'cantidad_compra': sum(compra.cantidad for compra in producto.compra.filter(fecha__range=[fecha_inicio, fecha_fin])),
})
return lista
def analyze_vendedor(self, fecha_inicio=None, fecha_fin=None):
vendedor_list = Perfil.objects.all()
lista = []
for vendedor in vendedor_list:
venta_list = Venta.objects.filter(vendedor=vendedor, fecha__range=[fecha_inicio, fecha_fin])
lista.append({
'vendedor': str(vendedor),
'cantidad': sum(venta.cantidad for venta in VentaDetalle.objects.filter(venta__in=venta_list))
})
return lista
``` |
{
"source": "jinchuuriki91/fyle-sdk-py",
"score": 3
} |
#### File: fylesdk/apis/jobs.py
```python
from typing import Dict
from .api_base import ApiBase
class Jobs(ApiBase):
"""Class for Jobs APIs."""
JOBS_URL = '/v2/jobs'
def body_tempate(self, callback_url, callback_method, job_description, job_data_url, object_id, hours,
start_datetime, org_user_id, payload):
"""
:param org_user_id: org_user_id
:param payload: callback payload
:param callback_url: callback URL for the job
:param callback_method: HTTP method for callback
:param job_description: Job description
:param job_data_url: Job data url
:param object_id: object id
:param start_datetime: start datetime for job
:param hours: repeat in hours
:returns: response
"""
jobs_payload = {
'template': {
'name': 'http.main',
'data': {
'url': callback_url,
'method': callback_method,
'payload': payload
}
},
'job_data': {
'description': job_description,
'url': '' if not job_data_url else job_data_url
},
'job_meta_data': {
'object_id': object_id
},
'trigger': {
'type': 'interval',
'when': {
'hours': hours,
'start_date': start_datetime
}
},
'notification': {
'enabled': False
},
'org_user_id': org_user_id
}
return jobs_payload
def trigger_now(self, callback_url: str, callback_method: str, org_user_id: str,
job_description: str, object_id: str, payload: any = None,
job_data_url: str = None, start_datetime=None, hours=None, ) -> Dict:
"""
Trigger callback immediately
:param org_user_id: org_user_id
:param payload: callback payload
:param callback_url: callback URL for the job
:param callback_method: HTTP method for callback
:param job_description: Job description
:param job_data_url: Job data url
:param object_id: object id
:param start_datetime: start datetime for job
:param hours: repeat in hours
:returns: response
"""
body = self.body_tempate(callback_url, callback_method, job_description, job_data_url, object_id, hours,
start_datetime, org_user_id, payload)
response = self._post_request(body, Jobs.JOBS_URL)
return response
def trigger_interval(self, callback_url: str, callback_method: str,
job_description: str, object_id: str, hours: int,
start_datetime: str, org_user_id: str, job_data_url: str = None, payload: str = None) -> Dict:
"""
Trigger callback on Interval
:param org_user_id: org_user_id
:param payload: payload
:param start_datetime: start datetime for job
:param hours: repeat in hours
:param callback_url: callback URL for the job
:param callback_method: HTTP method for callback
:param job_description: Job description
:param job_data_url: Job data url
:param object_id: object id
:returns: response
"""
body = self.body_tempate(callback_url, callback_method, job_description, job_data_url, object_id, hours,
start_datetime, org_user_id, payload)
response = self._post_request(body, Jobs.JOBS_URL)
return response
def delete(self, job_id):
"""
Delete job
:param job_id: id of the job to delete
:return:
"""
response = self.delete_job_request(job_id)
return response
``` |
{
"source": "jinchuuriki91/python-zomato",
"score": 3
} |
#### File: python-zomato/zomato_wrapper/api.py
```python
from bs4 import BeautifulSoup
from zomato_wrapper.base import get_page
from zomato_wrapper.constants import BASE_URL
def get_countries(tag=None):
soup = BeautifulSoup(get_page(BASE_URL), features="html.parser")
countries_resultset = soup.find("footer", {"id": "footer"}).findChildren("a", {"class": "pl5"})
resp = [{
"name": x.get_text(strip=True),
"url": x.get("href", None),
"tag": x.get("href", "").split('/')[-1]
} for x in countries_resultset]
if tag:
fil = list(filter(lambda x: x["tag"] == tag, resp))
return fil[0] if fil else None
return resp
def get_cities(country_tag):
country = get_countries(country_tag)
if not country:
return None
soup = BeautifulSoup(get_page(country["url"]), features="html.parser")
city_resultset = soup.findChildren("a", {"style": "flex-grow: 1;"})
resp = [{
"name": x.get_text(strip=True),
"url": x.get("href", None)
} for x in city_resultset]
return resp
``` |
{
"source": "JincorTech/backend-synapse",
"score": 2
} |
#### File: synapse/handlers/identity.py
```python
from twisted.internet import defer
from synapse.api.errors import (
CodeMessageException
)
from ._base import BaseHandler
from synapse.util.async import run_on_reactor
from synapse.api.errors import SynapseError, Codes
import json
import logging
logger = logging.getLogger(__name__)
class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
def _should_trust_id_server(self, id_server):
if id_server not in self.trusted_id_servers:
if self.trust_any_id_server_just_for_testing_do_not_use:
logger.warn(
"Trusting untrustworthy ID server %r even though it isn't"
" in the trusted id list for testing because"
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
" is set in the config",
id_server,
)
else:
return False
return True
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
yield run_on_reactor()
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
if not self._should_trust_id_server(id_server):
logger.warn(
'%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server
)
defer.returnValue(None)
data = {}
try:
data = yield self.http_client.get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/3pid/getValidated3pid"
),
{'sid': creds['sid'], 'client_secret': client_secret}
)
except CodeMessageException as e:
data = json.loads(e.msg)
if 'medium' in data:
defer.returnValue(data)
defer.returnValue(None)
@defer.inlineCallbacks
def bind_threepid(self, creds, mxid):
yield run_on_reactor()
logger.debug("binding threepid %r to %s", creds, mxid)
data = None
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
try:
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server, "/_matrix/identity/api/v1/3pid/bind"
),
{
'sid': creds['sid'],
'client_secret': client_secret,
'mxid': mxid,
}
)
logger.debug("bound threepid %r to %s", creds, mxid)
except CodeMessageException as e:
data = json.loads(e.msg)
defer.returnValue(data)
@defer.inlineCallbacks
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
yield run_on_reactor()
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
params = {
'email': email,
'client_secret': client_secret,
'send_attempt': send_attempt,
}
params.update(kwargs)
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/validate/email/requestToken"
),
params
)
defer.returnValue(data)
except CodeMessageException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e
@defer.inlineCallbacks
def requestMsisdnToken(
self, id_server, country, phone_number,
client_secret, send_attempt, **kwargs
):
yield run_on_reactor()
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
params = {
'country': country,
'phone_number': phone_number,
'client_secret': client_secret,
'send_attempt': send_attempt,
}
params.update(kwargs)
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/validate/msisdn/requestToken"
),
params
)
defer.returnValue(data)
except CodeMessageException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e
```
#### File: backend-synapse/synapse/visibility.py
```python
from twisted.internet import defer
from synapse.api.constants import Membership, EventTypes
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
import logging
logger = logging.getLogger(__name__)
VISIBILITY_PRIORITY = (
"world_readable",
"shared",
"invited",
"joined",
)
MEMBERSHIP_PRIORITY = (
Membership.JOIN,
Membership.INVITE,
Membership.KNOCK,
Membership.LEAVE,
Membership.BAN,
)
@defer.inlineCallbacks
def filter_events_for_clients(store, user_tuples, events, event_id_to_state):
""" Returns dict of user_id -> list of events that user is allowed to
see.
Args:
user_tuples (str, bool): (user id, is_peeking) for each user to be
checked. is_peeking should be true if:
* the user is not currently a member of the room, and:
* the user has not been a member of the room since the
given events
events ([synapse.events.EventBase]): list of events to filter
"""
forgotten = yield preserve_context_over_deferred(defer.gatherResults([
preserve_fn(store.who_forgot_in_room)(
room_id,
)
for room_id in frozenset(e.room_id for e in events)
], consumeErrors=True))
# Set of membership event_ids that have been forgotten
event_id_forgotten = frozenset(
row["event_id"] for rows in forgotten for row in rows
)
ignore_dict_content = yield store.get_global_account_data_by_type_for_users(
"m.ignored_user_list", user_ids=[user_id for user_id, _ in user_tuples]
)
# FIXME: This will explode if people upload something incorrect.
ignore_dict = {
user_id: frozenset(
content.get("ignored_users", {}).keys() if content else []
)
for user_id, content in ignore_dict_content.items()
}
def allowed(event, user_id, is_peeking, ignore_list):
"""
Args:
event (synapse.events.EventBase): event to check
user_id (str)
is_peeking (bool)
ignore_list (list): list of users to ignore
"""
if not event.is_state() and event.sender in ignore_list:
return False
state = event_id_to_state[event.event_id]
# get the room_visibility at the time of the event.
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
if visibility_event:
visibility = visibility_event.content.get("history_visibility", "shared")
else:
visibility = "shared"
if visibility not in VISIBILITY_PRIORITY:
visibility = "shared"
# if it was world_readable, it's easy: everyone can read it
if visibility == "world_readable":
return True
# Always allow history visibility events on boundaries. This is done
# by setting the effective visibility to the least restrictive
# of the old vs new.
if event.type == EventTypes.RoomHistoryVisibility:
prev_content = event.unsigned.get("prev_content", {})
prev_visibility = prev_content.get("history_visibility", None)
if prev_visibility not in VISIBILITY_PRIORITY:
prev_visibility = "shared"
new_priority = VISIBILITY_PRIORITY.index(visibility)
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
if old_priority < new_priority:
visibility = prev_visibility
# likewise, if the event is the user's own membership event, use
# the 'most joined' membership
membership = None
if event.type == EventTypes.Member and event.state_key == user_id:
membership = event.content.get("membership", None)
if membership not in MEMBERSHIP_PRIORITY:
membership = "leave"
prev_content = event.unsigned.get("prev_content", {})
prev_membership = prev_content.get("membership", None)
if prev_membership not in MEMBERSHIP_PRIORITY:
prev_membership = "leave"
# Always allow the user to see their own leave events, otherwise
# they won't see the room disappear if they reject the invite
if membership == "leave" and (
prev_membership == "join" or prev_membership == "invite"
):
return True
new_priority = MEMBERSHIP_PRIORITY.index(membership)
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
if old_priority < new_priority:
membership = prev_membership
# otherwise, get the user's membership at the time of the event.
if membership is None:
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
if membership_event.event_id not in event_id_forgotten:
membership = membership_event.membership
# if the user was a member of the room at the time of the event,
# they can see it.
if membership == Membership.JOIN:
return True
if visibility == "joined":
# we weren't a member at the time of the event, so we can't
# see this event.
return False
elif visibility == "invited":
# user can also see the event if they were *invited* at the time
# of the event.
return membership == Membership.INVITE
else:
# visibility is shared: user can also see the event if they have
# become a member since the event
#
# XXX: if the user has subsequently joined and then left again,
# ideally we would share history up to the point they left. But
# we don't know when they left.
return not is_peeking
defer.returnValue({
user_id: [
event
for event in events
if allowed(event, user_id, is_peeking, ignore_dict.get(user_id, []))
]
for user_id, is_peeking in user_tuples
})
@defer.inlineCallbacks
def filter_events_for_clients_context(store, user_tuples, events, event_id_to_context):
user_ids = set(u[0] for u in user_tuples)
event_id_to_state = {}
for event_id, context in event_id_to_context.items():
state = yield store.get_events([
e_id
for key, e_id in context.current_state_ids.iteritems()
if key == (EventTypes.RoomHistoryVisibility, "")
or (key[0] == EventTypes.Member and key[1] in user_ids)
])
event_id_to_state[event_id] = state
res = yield filter_events_for_clients(
store, user_tuples, events, event_id_to_state
)
defer.returnValue(res)
@defer.inlineCallbacks
def filter_events_for_client(store, user_id, events, is_peeking=False):
"""
Check which events a user is allowed to see
Args:
user_id(str): user id to be checked
events([synapse.events.EventBase]): list of events to be checked
is_peeking(bool): should be True if:
* the user is not currently a member of the room, and:
* the user has not been a member of the room since the given
events
Returns:
[synapse.events.EventBase]
"""
types = (
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
event_id_to_state = yield store.get_state_for_events(
frozenset(e.event_id for e in events),
types=types
)
res = yield filter_events_for_clients(
store, [(user_id, is_peeking)], events, event_id_to_state
)
defer.returnValue(res.get(user_id, []))
``` |
{
"source": "jinczing/AudioCLIP",
"score": 2
} |
#### File: AudioCLIP/ignite_trainer/_visdom.py
```python
import os
import sys
import json
import time
import tqdm
import socket
import subprocess
import numpy as np
import visdom
from typing import Tuple
from typing import Optional
def calc_ytick_range(vis: visdom.Visdom, window_name: str, env: Optional[str] = None) -> Tuple[float, float]:
lower_bound, upper_bound = -1.0, 1.0
stats = vis.get_window_data(win=window_name, env=env)
if stats:
stats = json.loads(stats)
stats = [np.array(item['y']) for item in stats['content']['data']]
stats = [item[item != np.array([None])].astype(np.float16) for item in stats]
if stats:
q25s = np.array([np.quantile(item, 0.25) for item in stats if len(item) > 0])
q75s = np.array([np.quantile(item, 0.75) for item in stats if len(item) > 0])
if q25s.shape == q75s.shape and len(q25s) > 0:
iqrs = q75s - q25s
lower_bounds = q25s - 1.5 * iqrs
upper_bounds = q75s + 1.5 * iqrs
stats_sanitized = list()
idx = 0
for item in stats:
if len(item) > 0:
item_sanitized = item[(item >= lower_bounds[idx]) & (item <= upper_bounds[idx])]
stats_sanitized.append(item_sanitized)
idx += 1
stats_sanitized = np.array(stats_sanitized)
q25_sanitized = np.array([np.quantile(item, 0.25) for item in stats_sanitized])
q75_sanitized = np.array([np.quantile(item, 0.75) for item in stats_sanitized])
iqr_sanitized = np.sum(q75_sanitized - q25_sanitized)
lower_bound = np.min(q25_sanitized) - 1.5 * iqr_sanitized
upper_bound = np.max(q75_sanitized) + 1.5 * iqr_sanitized
return lower_bound, upper_bound
def plot_line(vis: visdom.Visdom,
window_name: str,
env: Optional[str] = None,
line_label: Optional[str] = None,
x: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
x_label: Optional[str] = None,
y_label: Optional[str] = None,
width: int = 576,
height: int = 416,
draw_marker: bool = False) -> str:
empty_call = not vis.win_exists(window_name)
if empty_call and (x is not None or y is not None):
return window_name
if x is None:
x = np.ones(1)
empty_call = empty_call & True
if y is None:
y = np.full(1, np.nan)
empty_call = empty_call & True
if x.shape != y.shape:
x = np.ones_like(y)
opts = {
'showlegend': True,
'markers': draw_marker,
'markersize': 5,
}
if empty_call:
opts['title'] = window_name
opts['width'] = width
opts['height'] = height
window_name = vis.line(
X=x,
Y=y,
win=window_name,
env=env,
update='append',
name=line_label,
opts=opts
)
xtickmin, xtickmax = 0.0, np.max(x) * 1.05
ytickmin, ytickmax = calc_ytick_range(vis, window_name, env)
opts = {
'showlegend': True,
'xtickmin': xtickmin,
'xtickmax': xtickmax,
'ytickmin': ytickmin,
'ytickmax': ytickmax,
'xlabel': x_label,
'ylabel': y_label
}
window_name = vis.update_window_opts(win=window_name, opts=opts, env=env)
return window_name
def create_summary_window(vis: visdom.Visdom,
visdom_env_name: str,
experiment_name: str,
summary: str) -> str:
return vis.text(
text=summary,
win=experiment_name,
env=visdom_env_name,
opts={'title': 'Summary', 'width': 576, 'height': 416},
append=vis.win_exists(experiment_name, visdom_env_name)
)
def connection_is_alive(host: str, port: int) -> bool:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect((host, port))
sock.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
def get_visdom_instance(host: str = 'localhost',
port: int = 8097,
env_name: str = 'main',
env_path: str = 'visdom_env') -> Tuple[visdom.Visdom, Optional[int]]:
vis_pid = None
if not connection_is_alive(host, port):
if any(host.strip('/').endswith(lh) for lh in ['127.0.0.1', 'localhost']):
os.makedirs(env_path, exist_ok=True)
tqdm.tqdm.write('Starting visdom on port {}'.format(port), end='')
vis_args = [
sys.executable,
'-m', 'visdom.server',
'-port', str(port),
'-env_path', os.path.join(os.getcwd(), env_path)
]
vis_proc = subprocess.Popen(vis_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(2.0)
vis_pid = vis_proc.pid
tqdm.tqdm.write('PID -> {}'.format(vis_pid))
trials_left = 5
while not connection_is_alive(host, port):
time.sleep(1.0)
tqdm.tqdm.write('Trying to connect ({} left)...'.format(trials_left))
trials_left -= 1
if trials_left < 1:
raise RuntimeError('Visdom server is not running. Please run "python -m visdom.server".')
vis = visdom.Visdom(
server='http://{}'.format(host),
port=port,
env=env_name
)
return vis, vis_pid
``` |
{
"source": "jind11/SememePSO-Attack",
"score": 3
} |
#### File: SememePSO-Attack/IMDB/AD_dpso_sem.py
```python
from __future__ import division
import numpy as np
import tensorflow as tf
import pickle
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding, LSTM, Bidirectional
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import *
np.random.seed(3333)
tf.set_random_seed(3333)
VOCAB_SIZE = 50000
with open('aux_files/dataset_%d.pkl' %VOCAB_SIZE, 'rb') as f:
dataset = pickle.load(f)
with open('word_candidates_sense.pkl','rb') as fp:
word_candidate=pickle.load(fp)
with open('pos_tags_test.pkl','rb') as fp:
test_pos_tags=pickle.load(fp)
# Prevent returning 0 as most similar word because it is not part of the dictionary
max_len = 250
train_x = pad_sequences(dataset.train_seqs2, maxlen=max_len, padding='post')
train_y = np.array(dataset.train_y)
test_x = pad_sequences(dataset.test_seqs2, maxlen=max_len, padding='post')
test_y = np.array(dataset.test_y)
batch_size = 1
lstm_size = 128
#max_len = 100
from attack_dpso_sem import PSOAttack
pop_size = 60
def bd_lstm(embedding_matrix):
max_len = 250
num_classes = 2
loss = 'binary_crossentropy'
activation = 'sigmoid'
embedding_dims = 300
num_words = VOCAB_SIZE
print('Build word_bdlstm model...')
model = Sequential()
model.add(Embedding( # Layer 0, Start
input_dim=num_words + 1, # Size to dictionary, has to be input + 1
output_dim=embedding_dims, # Dimensions to generate
weights=[embedding_matrix], # Initialize word weights
input_length=max_len,
name="embedding_layer",
trainable=False))
OPTIMIZER = 'adam'
model.add(Bidirectional(CuDNNLSTM(128, return_sequences=True)))
model.add(Bidirectional(CuDNNLSTM(64, return_sequences=True)))
model.add(Dropout(0.5))
model.add(GlobalMaxPooling1D())
model.add(Dense(2, activation=activation))
model.summary()
# try using different optimizers and different optimizer configs
model.compile(OPTIMIZER, loss, metrics=['accuracy'])
return model
embedding_matrix = np.load(('aux_files/embeddings_glove_%d.npy' % (VOCAB_SIZE)))
embedding_matrix=embedding_matrix.T
model = bd_lstm(embedding_matrix)
model_path = 'bdlstm_models'
model.load_weights(model_path)
test_y2 = np.array([[0, 1] if t == 1 else [1, 0] for t in test_y])
all_scores_origin = model.evaluate(test_x, test_y2)
print('all origin test_loss: %f, accuracy: %f' % (all_scores_origin[0], all_scores_origin[1]))
ga_atttack = PSOAttack(model,word_candidate, dataset,
max_iters=20,
pop_size=pop_size)
SAMPLE_SIZE = len(dataset.test_y)
TEST_SIZE = 1000
test_idx = np.random.choice(len(dataset.test_y), SAMPLE_SIZE, replace=False)
test_len = []
for i in range(SAMPLE_SIZE):
test_len.append(np.sum(np.sign(test_x[test_idx[i]])))
print('Shortest sentence in our test set is %d words' %np.min(test_len))
test_list = []
orig_list = []
orig_label_list = []
adv_list = []
dist_list = []
adv_orig=[]
adv_orig_label=[]
fail_list=[]
adv_training_examples=[]
SUCCESS_THRESHOLD = 0.25
for i in range(SAMPLE_SIZE):
pos_tags=test_pos_tags[test_idx[i]]
x_orig = test_x[test_idx[i]]
orig_label = test_y[test_idx[i]]
orig_preds= model.predict(x_orig[np.newaxis, :])[0]
if np.argmax(orig_preds) != orig_label:
print('skipping wrong classifed ..')
print('--------------------------')
continue
x_len = np.sum(np.sign(x_orig))
if x_len >= 100:
print('skipping too long input..')
print('--------------------------')
continue
if x_len<10:
print('skipping too short input..')
print('--------------------------')
continue
print('****** ', len(test_list) + 1, ' ********')
test_list.append(test_idx[i])
orig_list.append(x_orig)
target_label = 1 if orig_label == 0 else 0
orig_label_list.append(orig_label)
x_adv = ga_atttack.attack(x_orig, target_label,pos_tags)
if x_adv is None:
print('%d failed' %(i+1))
fail_list.append(test_idx[i])
else:
num_changes = np.sum(x_orig != x_adv)
print('%d - %d changed.' %(i+1, int(num_changes)))
modify_ratio=num_changes/x_len
if modify_ratio>0.25:
print('too long:',modify_ratio)
else:
print('success!')
adv_list.append(x_adv)
adv_orig.append(test_idx[i])
adv_orig_label.append(orig_label)
adv_training_examples.append(test_idx[i])
dist_list.append(modify_ratio)
# display_utils.visualize_attack(sess, model, dataset, x_orig, x_adv)
print('--------------------------')
if (len(test_list)>= TEST_SIZE):
break
print('Attack success rate : {:.2f}%'.format(len(adv_list)/len(test_list)*100))
print('Median percentange of modifications: {:.02f}% '.format(
np.median(dist_list)*100))
print('Mean percentange of modifications: {:.02f}% '.format(
np.mean(dist_list)*100))
with open('AD_dpso_sem.pkl', 'wb') as f:
pickle.dump((fail_list,adv_orig_label,adv_orig,adv_list,dist_list), f)
```
#### File: SememePSO-Attack/SNLI/encap_snli_bert.py
```python
from SNLI_BERT import ModelTrainer
from SNLI_BERT import adjustBatchInputLen
from pytorch_transformers import BertTokenizer, BertModel, AdamW, WarmupLinearSchedule
from torch import nn
import torch
import config
class Model(nn.Module):
def __init__(self, inv_dict):
super(Model, self).__init__()
self.config = config.SNLIConfig()
model = BertModel.from_pretrained(self.config.BERT_MODEL)
self.model = ModelTrainer(model, 3)
self.model.load_state_dict(torch.load(self.config.model_name))
self.model = self.model.eval().cuda()
self.inv_dict = inv_dict
self.tokenizer = BertTokenizer.from_pretrained(self.config.BERT_MODEL)
self.m = nn.Softmax(1)
def forward(self,input_x):
assert len(input_x[0]) == len(input_x[1]), "premise and hypothesis should share the same batch lens!"
num_instance = len(input_x[0])
batch = dict()
batch["inputs"] = []
batch["labels"] = torch.zeros((num_instance,)).long()
for i in range(len(input_x[0])):
tokens = list()
tokens.append(self.tokenizer.cls_token)
for k in [0, 1]:
add_sep = False
if k == 0:
add_sep = True
for j in range(len(input_x[k][i])):
#print(input_x[i], tokens)
#print(type(input_x[i][j]))
#print(self.dataset.inv_dict[0])
# inv_dict has no padding, maybe because of keras setting
if input_x[k][i][j] != 0:
tokens.append(self.inv_dict[int(input_x[k][i][j])])
if add_sep:
tokens.append("[SEP]")
tokens = self.tokenizer.convert_tokens_to_ids(tokens)
batch["inputs"].append(tokens)
adjustBatchInputLen(batch)
end_id = self.tokenizer.convert_tokens_to_ids("[SEP]")
for i in range(len(input_x[0])):
tokens = batch["inputs"][i]
tokens.append(end_id)
batch["inputs"] = torch.stack([torch.LongTensor(x) for x in batch['inputs']])
with torch.no_grad():
loss, logits = self.model(batch)
logits = self.m(logits[:,[1,0,2]])
return logits.cpu().numpy()
def predict(self, input_x):
# sess is of no use, just to tailor the ugly interface
return self(input_x)
def pred(self, x, y):
return self([x, y])
def adjustBatchInputLen(self, batch):
inputs = batch["inputs"]
length = 0
for item in inputs:
length = max(length, len(item))
length = min(length, self.config.max_sent_lens)
num = len(inputs)
for i in range(num):
if length > len(inputs[i]):
for j in range(length - len(inputs[i])):
inputs[i].append(self.tokenizer.pad_token_id)
else:
inputs[i] = inputs[i][:length]
```
#### File: SememePSO-Attack/SST/config.py
```python
class DianpingConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-chinese"
self.max_sent_lens = 64
class SSTConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-uncased"
self.max_sent_lens = 32
class SNLIConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-uncased"
self.max_sent_lens = 64
class IMDBConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-uncased"
self.max_sent_lens = 254
class LCQMCConfig:
def __init__(self):
self.instance_name = "BERTModel.pt"
self.model_name = self.instance_name
self.BERT_MODEL = "bert-base-chinese"
self.max_sent_lens = 64
``` |
{
"source": "jind11/sentence-classification",
"score": 3
} |
#### File: sentence-classification/code/util.py
```python
import json
import os
import time
import cPickle
import numpy as np
import sys
import re
import operator
import fnmatch
from gensim.models.keyedvectors import KeyedVectors
from tensorflow.contrib import learn
from keras.preprocessing import sequence
# construc embedding vectors based on the google word2vec and vocabulary
def process_word2vec(word2vec_dir, vocab, save_path, random_init=True):
# read pre-trained word embedddings from the binary file
print('Loading google word2vec...')
word2vec_path = word2vec_dir + '/GoogleNews-vectors-negative300.bin.gz'
word_vectors = KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
print('Word2vec loaded!')
if random_init:
word2vec = np.random.uniform(-0.25, 0.25, (len(vocab), 300))
else:
word2vec = np.zeros((len(vocab), 300))
found = 0
for idx, token in enumerate(vocab):
try:
vec = word_vectors[token]
except:
pass
else:
word2vec[idx, :] = vec
found += 1
del word_vectors
print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab), word2vec_path))
np.savez_compressed(save_path, word2vec=word2vec)
print("saved trimmed word2vec matrix at: {}".format(save_path))
# construct embedding vectors according to the GloVe word vectors and vocabulary
def process_glove(glove_dir, glove_dim, vocab_dir, save_path, random_init=True):
"""
:param vocab_list: [vocab]
:return:
"""
save_path = save_path + '.{}'.format(glove_dim)
if not os.path.isfile(save_path + ".npz"):
# read vocabulary
with open(vocab_dir + '/vocabulary.pickle', 'rb') as f:
vocab_map = cPickle.load(f)
f.close()
vocab_list = list(zip(*vocab_map)[0])
glove_path = os.path.join(glove_dir, "glove.6B.{}d.txt".format(glove_dim))
if random_init:
glove = np.random.uniform(-0.25, 0.25, (len(vocab_list), glove_dim))
else:
glove = np.zeros((len(vocab_list), glove_dim))
found = 0
with open(glove_path, 'r') as fh:
for line in fh.readlines():
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in vocab_list:
idx = vocab_list.index(word)
glove[idx, :] = vector
found += 1
if word.capitalize() in vocab_list:
idx = vocab_list.index(word.capitalize())
glove[idx, :] = vector
found += 1
if word.upper() in vocab_list:
idx = vocab_list.index(word.upper())
glove[idx, :] = vector
found += 1
print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab_list), glove_path))
np.savez_compressed(save_path, glove=glove)
print("saved trimmed glove matrix at: {}".format(save_path))
def load_embeddings(dir, embedding_type):
return np.load(dir)[embedding_type]
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
# preprocess the MR datasets
def preprocess_data_and_labels_MR(positive_data_file_path, negative_data_file_path, save_path, pad_width=0):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file_path, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file_path, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[1] for _ in positive_examples]
negative_labels = [[0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# pad the left and right with zeros
if pad_width > 0:
x_padded = np.lib.pad(x, ((0, 0), (pad_width, pad_width)), 'constant', constant_values=(0, 0))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(x.shape[0]))
x_shuffled = x_padded[shuffle_indices]
y_shuffled = y[shuffle_indices]
# merge data and labels
data_and_labels = zip(x_shuffled, y_shuffled)
# save train data and labels
with open(save_path + '/data_and_labels.pickle', 'w') as f:
cPickle.dump(data_and_labels, f)
f.close()
# get vocabulary and save it
# Extract word:id mapping from the object.
vocab_dict = vocab_processor.vocabulary_._mapping
# Sort the vocabulary dictionary on the basis of values(id)
sorted_vocab_dict = sorted(vocab_dict.items(), key=operator.itemgetter(1))
sorted_vocab = list(zip(*sorted_vocab_dict))[0]
with open(save_path + '/vocabulary.pickle', 'w') as f:
cPickle.dump(sorted_vocab, f)
f.close()
# Process word vector embeddings
process_word2vec('../data', sorted_vocab, '../data/word2vec.trimmed')
# Extract a set of n-grams from a list of integers.
def create_ngram_set(input_list, ngram_value=2):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
# Augment the input list of list (sequences) by appending n-grams values.
def add_ngram(sequences, token_indice, ngram_range=2):
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for ngram_value in range(2, ngram_range + 1):
for i in range(len(new_list) - ngram_value + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
# preprocess the MR datasets especially for fasttext model
def preprocess_data_and_labels_MR_fasttext(positive_data_file_path, negative_data_file_path, save_path, ngram_range=1, pad_width=0):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file_path, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file_path, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[1] for _ in positive_examples]
negative_labels = [[0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = list(vocab_processor.fit_transform(x_text))
# Extract word:id mapping from the object.
vocab_dict = vocab_processor.vocabulary_._mapping
max_features = len(vocab_dict)
# remove filled <UNK>, i.e., 0 index
x = [filter(lambda a: a != 0, line) for line in x]
print('Average sequence length before adding n-grams: {}'.format(np.mean(list(map(len, x)), dtype=int)))
# Add n-grams...
if ngram_range > 1:
print('Adding {}-gram features'.format(ngram_range))
# Create set of unique n-gram from the training set.
ngram_set = set()
for input_list in x:
for i in range(2, ngram_range + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
# Dictionary mapping n-gram token to a unique integer.
# Integer values are greater than max_features in order
# to avoid collision with existing features.
start_index = max_features + 1
token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
indice_token = {token_indice[k]: k for k in token_indice}
# Augmenting with n-grams features
x = add_ngram(x, token_indice, ngram_range)
print('Average sequence length after adding n-grams: {}'.format(np.mean(list(map(len, x)), dtype=int)))
# pad sequence
x = np.array(sequence.pad_sequences(x, padding='post'))
print('x shape:', x.shape)
# pad the left and right with zeros
if pad_width > 0:
x_padded = np.lib.pad(x, ((0, 0), (pad_width, pad_width)), 'constant', constant_values=(0, 0))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(x_padded.shape[0]))
x_shuffled = x_padded[shuffle_indices]
y_shuffled = y[shuffle_indices]
# merge data and labels
data_and_labels = zip(x_shuffled, y_shuffled)
# save train data and labels
with open(save_path, 'w') as f:
cPickle.dump(data_and_labels, f)
f.close()
def load_data_MR(file_dir, fold=1):
print ("Loading datasets...")
# read train data and labels
with open(file_dir + '/data_and_labels.pickle', 'r') as f:
data_and_labels = cPickle.load(f)
f.close()
# Split train/test set
test_sample_index_s = int((fold - 1) / 10.0 * float(len(data_and_labels)))
test_sample_index_e = int(fold / 10.0 * float(len(data_and_labels)))
train_data_and_labels = data_and_labels[:test_sample_index_s] + data_and_labels[test_sample_index_e:]
test_data_and_labels = data_and_labels[test_sample_index_s:test_sample_index_e]
# Split data and labels
train_data, train_labels = zip(*train_data_and_labels)
train_data, train_labels = np.array(train_data), np.array(train_labels)
test_data, test_labels = zip(*test_data_and_labels)
test_data, test_labels = np.array(test_data), np.array(test_labels)
# read vocabulary
with open(file_dir + '/vocabulary.pickle', 'r') as f:
vocab = cPickle.load(f)
f.close()
seq_len = train_data.shape[1]
vocab_size = len(vocab)
return (train_data, train_labels, test_data, test_labels, seq_len, vocab_size)
def load_data_MR_fasttext(file_path, fold=1):
print ("Loading datasets...")
# read train data and labels
with open(file_path, 'r') as f:
data_and_labels = cPickle.load(f)
f.close()
# Split train/test set
test_sample_index_s = int((fold - 1) / 10.0 * float(len(data_and_labels)))
test_sample_index_e = int(fold / 10.0 * float(len(data_and_labels)))
train_data_and_labels = data_and_labels[:test_sample_index_s] + data_and_labels[test_sample_index_e:]
test_data_and_labels = data_and_labels[test_sample_index_s:test_sample_index_e]
# Split data and labels
train_data, train_labels = zip(*train_data_and_labels)
train_data, train_labels = np.array(train_data), np.array(train_labels)
test_data, test_labels = zip(*test_data_and_labels)
test_data, test_labels = np.array(test_data), np.array(test_labels)
seq_len = train_data.shape[1]
vocab_size = max([np.amax(train_data), np.amax(test_data)]) + 1
return (train_data, train_labels, test_data, test_labels, seq_len, vocab_size)
# preprocess the AskaPatient dataset
def preprocess_data_and_labels_AAP(data_file_path, save_path):
def merge_folds(data_file_path, save_path):
# merge all the separated folds into one file
train = []
val = []
test = []
for file in os.listdir(data_file_path):
if fnmatch.fnmatch(file, '*train.txt'):
train += (open(data_file_path + '/' + file, 'r').readlines())
elif fnmatch.fnmatch(file, '*validation.txt'):
val += (open(data_file_path + '/' + file, 'r').readlines())
else:
test += (open(data_file_path + '/' + file, 'r').readlines())
open(save_path + '/train.txt', 'w').write(''.join(train))
open(save_path + '/val.txt', 'w').write(''.join(val))
open(save_path + '/test.txt', 'w').write(''.join(test))
print len(train+val+test)
merge_folds(data_file_path, save_path)
def create_batches(data, labels, batch_size, shuffle=True):
# Generates a batch iterator for a dataset.
data_and_labels = np.array(zip(data, labels))
data_size = len(data)
num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
# Shuffle the data
if shuffle:
np.random.seed(11)
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data_and_labels[shuffle_indices]
else:
shuffled_data = data_and_labels
# create batches
batches = []
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
batches.append(shuffled_data[start_index:end_index])
return batches
class Progbar(object):
"""
Progbar class copied from keras (https://github.com/fchollet/keras/)
Displays a progress bar.
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, exact=None):
"""
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
values = values or []
exact = exact or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for k, v in exact:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1]
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current)/self.target
prog_width = int(self.width*prog)
if prog_width > 0:
bar += ('='*(prog_width-1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.'*(self.width-prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit*(self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if isinstance(self.sum_values[k], list):
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width-self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + "\n")
def add(self, n, values=None):
self.update(self.seen_so_far+n, values)
if __name__=="__main__":
preprocess_data_and_labels_MR('../data/rt-polarity.pos', '../data/rt-polarity.neg', '../data', pad_width=4)
# preprocess_data_and_labels_MR_fasttext('../data/rt-polarity.pos', '../data/rt-polarity.neg', '../data/fasttext_data_and_labels.pickle',
# ngram_range=3, pad_width=4)
``` |
{
"source": "jindada1/Relaxion",
"score": 4
} |
#### File: Relaxion/db/adapter.py
```python
import sqlite3
def connect(dbfile):
try:
conn = sqlite3.connect(dbfile)
# cursor = self.conn.cursor()
print("[ok] connect db file %s" % dbfile)
return conn
except sqlite3.Error as e:
print("[err] %s" % e)
return None
class dbAdapter(object):
def __init__(self, conn, table=None):
self.conn = conn
self.cursor = conn.cursor()
if table:
self.cursor.execute('select * from {}'.format(table))
print("[ok] find table:%s" % table)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self.conn:
self.conn.commit()
self.cursor.close()
self.conn.close()
# execute sql commands
def sql_do(self, sql, params):
self.cursor.execute(sql, params)
self.conn.commit()
# will be 1 if the update was successful (affecting 1 row) or 0 if it failed
return self.cursor.rowcount
def retrive(self, sql, params):
self.cursor.execute(sql, params)
return self.cursor.fetchone()
def retrive_all(self, sql, params):
self.cursor.execute(sql, params)
return self.cursor.fetchall()
def disp_table(self):
self.cursor.execute(
'select * from {}'.format(self.table))
for row in self.cursor:
print(row)
#######################################################################
#
# Function to fetch/query data from a database.
#
# This is the main function used to query a database for data.
#
# @param table The name of the database's table to query from.
#
# @param columns The string of columns, comma-separated, to fetch.
#
# @param limit Optionally, a limit of items to fetch.
#
#######################################################################
def get(self, table, columns, limit=None):
query = "SELECT {0} from {1};".format(columns, table)
self.cursor.execute(query)
# fetch data
rows = self.cursor.fetchall()
return rows[len(rows)-limit if limit else 0:]
#######################################################################
#
# Utility function that summarizes a dataset.
#
# This function takes a dataset, retrieved via the get() function, and
# returns only the maximum, minimum and average for each column.
#
# @param rows The retrieved data.
#
#######################################################################
@staticmethod
def summary(rows):
# split the rows into columns
cols = [[r[c] for r in rows] for c in range(len(rows[0]))]
# the time in terms of fractions of hours of how long ago
# the sample was assumes the sampling period is 10 minutes
def t(col): return "{:.1f}".format((len(rows) - col) / 6.0)
# return a tuple, consisting of tuples of the maximum,
# the minimum and the average for each column and their
# respective time (how long ago, in fractions of hours)
# average has no time, of course
ret = []
for c in cols:
hi = max(c)
hi_t = t(c.index(hi))
lo = min(c)
lo_t = t(c.index(lo))
avg = sum(c)/len(rows)
ret.append(((hi, hi_t), (lo, lo_t), avg))
return ret
#######################################################################
#
# Utility function that converts a dataset into CSV format.
#
# @param data The data, retrieved from the get() function.
#
# @param fname The file name to store the data in.
#
# @see get()
#
#######################################################################
@staticmethod
def toCSV(data, fname="output.csv"):
with open(fname, 'a') as file:
file.write(",".join([str(j) for i in data for j in i]))
```
#### File: Relaxion/music/fetcher.py
```python
from aiohttp import ClientSession
from datetime import datetime
import time
import json
import base64
import urllib
class Fetcher(object):
def __init__(self, **kwargs):
self.cookies = {}
self.headers = {}
async def _asyncGetHeaders(self, url, params = None):
async with ClientSession() as session:
resp = await session.get(url, params=params)
await session.close()
return resp.headers
async def _asyncGetText(self, url, params = None):
async with ClientSession() as session:
resp = await session.get(url, params=params)
txt = await resp.text()
await session.close()
return txt
async def _asyncGetJson(self, url, params = None):
async with ClientSession() as session:
resp = await session.get(url, params=params)
js = json.loads(await resp.text())
await session.close()
return js
async def _asyncPostJson(self, url, params = None, cookies = None):
async with ClientSession(cookies = cookies) as session:
resp = await session.post(url, data=params, headers=self.headers)
js = json.loads(await resp.text())
await session.close()
return js
async def _asyncGetJsonHeaders(self, url, params = None):
async with ClientSession() as session:
async with session.get(url, params=params, headers=self.headers) as resp:
js = json.loads(await resp.text())
await session.close()
return js
async def _asyncGetJsonHeadersCookies(self, url, params = None):
async with ClientSession(cookies=self.cookies) as session:
async with session.get(url, params=params, headers=self.headers) as resp:
js = json.loads(await resp.text())
await session.close()
return js
async def _asyncGetTextHeadersCookies(self, url, params = None):
async with ClientSession(cookies=self.cookies) as session:
async with session.get(url, params=params, headers=self.headers) as resp:
txt = await resp.text()
await session.close()
return txt
def jsonify(self, _dict):
return json.dumps(_dict).replace(" ", '')
def base64decode(self, text):
return base64.b64decode(text).decode(encoding="utf-8-sig")
def base64encode(self, text):
return base64.b64encode(text)
def to_time(self, timestamp):
return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d, %H:%M:%S")
@property
def now_str(self):
return str(int(time.time()))
def quote_cna(self, val):
if '%' in val:
return val
return urllib.parse.quote(val)
def split_url(self, url):
return urllib.parse.urlsplit(url)
def unsplit_url(self, url):
return urllib.parse.urlunsplit(url)
```
#### File: Relaxion/music/interface.py
```python
from .fetcher import Fetcher
class Music(Fetcher):
def __init__(self, **kwargs):
Fetcher.__init__(self, **kwargs)
print("[ok] construct %s" % kwargs["name"])
def _getname(self, singers):
artist = ""
for index, singer in enumerate(singers):
if index == 0:
artist += singer['name']
else:
artist += "," + singer['name']
return artist
def _song(self, p, res_id, com_id, mv_id, pic_url, alb_name, lrc_url, name, arts, time, playable=True):
return {
"platform": p,
"idforres": res_id,
"url":'/{}/song/{}'.format(self, p, res_id),
"idforcomments": com_id,
"mvid": mv_id,
"cover": pic_url,
"albumname": alb_name,
"lrc": lrc_url,
"name": name,
"artist": arts,
"interval": time,
"playable": playable
}
def _album(self, p, alb_id, pic_url, name, com_id, arts, pub_day):
return {
"platform": p,
"albumid": alb_id,
"pic_url": pic_url,
"name": name,
"idforcomments": com_id,
"artist": arts,
"publish_date": pub_day
}
def _mv(self, p, name, pic_url, mv_id, com_id, arts, time, pub_day):
return {
"platform": p,
"name": name,
"pic_url": pic_url,
"mvid": mv_id,
"idforcomments": com_id,
"artist": arts,
"duration": time,
"publish_date": pub_day
}
def _uri(self, uri = None):
if uri:
return {"uri": uri}
return {"uri": 'https://www.baidu.com', "error": 1}
def _comment(self, avatar, username, content, stars, time):
return {
"avatar": avatar,
"username": username,
"content": content,
"stars": stars,
"time": time
}
def _songlist(self, p, id, name, pic, songnum):
return {
"platform":p,
"dissid":_id,
"name": name,
"pic": pic,
"songnum": songnum
}
def mvpicCDN(self, path):
# redirect to mv cover url
return self.mv_pic_host + path
async def searchSong(self, k, p, n):
return "base search result"
async def searchAlbum(self, k, p, n):
return "base search result"
async def searchMV(self, k, p, n):
return "base search result"
async def mvuri(self, _id):
return "mvuri"
async def musicuri(self, _id):
return "musicuri"
async def lyric(self, _id):
return "lyric"
async def songsinList(self, dissid, p, n):
return "songsinList"
async def songsinAlbum(self, _id):
return "songsinAlbum"
```
#### File: music/platforms/wangyi.py
```python
from music import Music
from Crypto.Cipher import AES
import binascii
import os
class WangYi(Music):
def __init__(self):
Music.__init__(self, name = "WangYi")
self.headers = {
'Referer' : 'https://music.163.com',
'Content-Type' : 'application/x-www-form-urlencoded',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
self.commentType = {
'music' : 'R_SO_4_',
'mv' : 'R_MV_5_',
'album' : 'R_AL_3_',
'songlist' : 'A_PL_0_',
'radio' : 'A_DJ_1_',
'video' : 'R_VI_62_',
'dynamic' : 'A_EV_2_'
}
self.mv_pic_host = 'http://p4.music.126.net/'
def hasCopyright(self, song):
privilege = song['privilege']
if privilege:
if (not privilege['st'] == None) and privilege['st'] < 0:
return False
if (privilege['fee'] > 0 and (not privilege['fee'] == 8) and privilege['payed'] == 0 and privilege['pl'] <= 0):
return True
if (privilege['fee'] == 16 or privilege['fee'] == 4 and privilege['flag'] & 2048):
return True
if ((privilege['fee'] == 0 or privilege['payed']) and privilege['pl'] > 0 and privilege['dl'] == 0):
return True
if (privilege['pl'] == 0 and privilege['dl'] == 0):
return False
return True
else:
if song['status'] >= 0 or song['fee'] > 0:
return True
return False
def playable(self, song):
free = not (song['fee'] == 4 or song['fee'] == 1)
cpright = self.hasCopyright(song)
return free and cpright
def encrypted_request(self, data) -> dict:
MODULUS = (
"00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7"
"b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280"
"<KEY>"
"575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b"
"3ece0462db0a22b8e7"
)
PUBKEY = "010001"
NONCE = b"0CoJUm6Qyw8W8jud"
data = self.jsonify(data).encode("utf-8")
secret = self.create_key(16)
params = self.aes(self.aes(data, NONCE), secret)
encseckey = self.rsa(secret, PUBKEY, MODULUS)
return {"params": params.decode(), "encSecKey": encseckey}
def aes(self, text, key):
pad = 16 - len(text) % 16
text = text + bytearray([pad] * pad)
encryptor = AES.new(key, 2, b"0102030405060708")
ciphertext = encryptor.encrypt(text)
return self.base64encode(ciphertext)
def rsa(self, text, pubkey, modulus):
text = text[::-1]
rs = pow(int(binascii.hexlify(text), 16), int(pubkey, 16), int(modulus, 16))
return format(rs, "x").zfill(256)
def create_key(self, size):
return binascii.hexlify(os.urandom(size))[:16]
# override, return object
async def searchSong(self, k, p, n):
params = {
'type': 1,
'limit': n,
'offset': int(p) * int(n),
's': k
}
api = "https://music.163.com/weapi/cloudsearch/get/web?csrf_token="
jsonresp = await self._asyncPostJson(api, params=self.encrypted_request(params))
result = {'songs': []}
append = result['songs'].append
try:
for wangyisong in jsonresp['result']['songs']:
append(self._song(
'wangyi',
wangyisong["id"],
wangyisong["id"],
wangyisong["mv"],
wangyisong['al']['picUrl'],
wangyisong["al"]['name'],
"/wangyi/lyric/%s" % wangyisong['id'],
wangyisong['name'],
self._getname(wangyisong['ar']),
int(wangyisong['dt']/1000),
self.playable(wangyisong)
))
except:
result['error'] = 1
return result
# override
async def searchAlbum(self, k, p, n):
params = {
's': k,
'type': 10, # 1: song, 10: album, 1004: MV 100: singer, 1000: songlist, 1002: user, 1006: 歌词, 1009: 电台, 1014: 视频
'limit': n,
'offset': int(p) * int(n),
}
api = "http://music.163.com/api/search/pc"
jsonresp = await self._asyncPostJson(api, params=params)
result = {'albums': []}
append = result['albums'].append
try:
for album in jsonresp['result']['albums']:
append(self._album(
'wangyi',
album['id'],
album['picUrl'],
album['name'],
album['id'],
self._getname(album['artists']),
album['publishTime'],
))
except:
result['error'] = 1
return result
# override
async def searchMV(self, k, p, n):
params = {
's': k,
'type': 1004, # 1: song, 10: album, 1004: MV 100: singer, 1000: songlist, 1002: user, 1006: 歌词, 1009: 电台, 1014: 视频
'limit': n,
'offset': int(p) * int(n),
}
api = "http://music.163.com/api/search/pc"
jsonresp = await self._asyncPostJson(api, params=params)
result = {'videos': []}
append = result['videos'].append
try:
for mv in jsonresp['result']['mvs']:
append(self._mv(
'wangyi',
mv['name'],
mv['cover'],
mv['id'],
mv['id'],
self._getname(mv['artists']),
int(mv['duration']/1000),
'-',
))
except:
result['error'] = 1
return result
# override
async def mvuri(self, _id):
params = {
"id": _id,
"r": 1080
}
api = "https://music.163.com/weapi/song/enhance/play/mv/url"
jsonresp = await self._asyncPostJson(api, params = self.encrypted_request(params))
result = self._uri(jsonresp['data']['url'])
return result
# override
async def musicuri(self, _id):
params = self.encrypted_request(dict(ids=[_id], br=32000))
api = "http://music.163.com/weapi/song/enhance/player/url"
jsonresp = await self._asyncPostJson(api, params=params)
result = self._uri(jsonresp['data'][0]['url'])
return result
# override
async def lyric(self, _id):
params = {
"csrf_token": "",
"id": _id,
"lv": -1,
"tv": -1
}
encrypt_data = self.encrypted_request(params)
api = "https://music.163.com/weapi/song/lyric"
jsonresp = await self._asyncPostJson(api, encrypt_data)
lrc = '[00:01.000] 没有歌词哦~'
try:
lrc = jsonresp['lrc']['lyric']
except:
pass
return lrc
# override
async def songsinList(self, _id, p, n):
# no page, I made pages start from 0
total = (int(p) + 1) * int(n)
params = {
"id" : _id,
"limit" : total,
"n" : total
}
api = "https://music.163.com/weapi/v3/playlist/detail"
params = self.encrypted_request(params)
jsonresp = await self._asyncPostJson(api, params = params)
songscut = jsonresp['playlist']['tracks'][total-int(n):]
result = {'songs':[]}
append = result['songs'].append
try:
for wangyisong in songscut:
append(self._song(
'wangyi',
wangyisong['id'],
wangyisong['id'],
wangyisong['mv'],
wangyisong['al']['picUrl'],
wangyisong['al']['name'],
"/wangyi/lyric/%s" % wangyisong['id'],
wangyisong['name'],
self._getname(wangyisong['ar']),
int(wangyisong['dt']/1000),
self.playable(wangyisong)
))
except:
result['error'] = 1
return result
# override
async def songsinAlbum(self, _id):
api = "https://music.163.com/weapi/v1/album/%s" % _id
jsonresp = await self._asyncPostJson(api, params = self.encrypted_request({}))
result = {'songs':[]}
append = result['songs'].append
try:
for wangyisong in jsonresp['songs']:
append(self._song(
'wangyi',
wangyisong['id'],
wangyisong['id'],
wangyisong['mv'],
wangyisong['al']['picUrl'],
wangyisong['al']['name'],
"/wangyi/lyric/%s" % wangyisong['id'],
wangyisong['name'],
self._getname(wangyisong['ar']),
"",
self.playable(wangyisong)
))
except:
result['error'] = 1
return result
# special
async def getComments(self, _id, t, p, n):
params = {
"offset": int(p) * int(n),
"limit": n,
"rid": _id,
"beforeTime": 0
}
api = "https://music.163.com/weapi/v1/resource/comments/%s" % (self.commentType[t] + _id)
encrypt_data = self.encrypted_request(params)
data = await self._asyncPostJson(api, params = encrypt_data)
# parse data
result = {'hot': {'num': 0, 'comments': []},
'normal': {'num': 0, 'comments': []}}
try:
for comment in data['comments']:
result['normal']['comments'].append(self._comment(
comment['user']['avatarUrl'],
comment['user']['nickname'],
comment['content'],
comment['likedCount'],
self.to_time(int(comment['time']/1000))
))
result['normal']['num'] = data['total']
except:
result['error'] = 1
return result
try:
for comment in data['hotComments']:
result['hot']['comments'].append(self._comment(
comment['user']['avatarUrl'],
comment['user']['nickname'],
comment['content'],
comment['likedCount'],
self.to_time(int(comment['time']/1000))
))
result['hot']['num'] = len(data['hotComments'])
except:
pass
return result
# special
async def userlist(self, user):
searchparams = {
"s":user,
"type":1002
}
searchapi = "http://music.163.com/api/search/pc"
userinfo = await self._asyncPostJson(searchapi, params=searchparams)
uid = userinfo["result"]["userprofiles"][0]["userId"]
# get user's playlist by uid
params = {
"uid": uid,
"limit": 300,
"offset": 0
}
api = "https://music.163.com/weapi/user/playlist"
params = self.encrypted_request(params)
jsonresp = await self._asyncPostJson(api, params = params)
res = {"lists":[]}
for _list in jsonresp['playlist']:
res["lists"].append(self._songlist(
"wangyi",
_list['id'],
_list['name'],
_list['coverImgUrl'],
_list['trackCount']
))
return res
# special
async def picurl(self, _id):
api = "https://music.163.com/weapi/v1/album/%s" % _id
jsonresp = await self._asyncPostJson(api, params = self.encrypted_request({}))
url = "https://i.loli.net/2020/01/31/9yvblCJoiVw1kAX.jpg"
try:
url = jsonresp['album']['picUrl']
except:
pass
return url
``` |
{
"source": "Jindae/github_crawler",
"score": 3
} |
#### File: Jindae/github_crawler/github_crawler.py
```python
import pymongo
import sys, json, traceback
import requests
from time import sleep
from datetime import datetime as dt
import csv
def loadConfig(config_file):
with open(config_file, 'r') as f:
config = json.load(f)
authList = [(name, key) for name, key in config['github.auth']]
return config, authList
def getMongoClient(config):
host = config['mongodb.auth']['host']
port = int(config['mongodb.auth']['port'])
mongo = pymongo.MongoClient(host=host, port=port)
return mongo
def getRateLimit(auth):
r = requests.get('https://api.github.com/rate_limit', auth=auth)
res = r.json()['rate']
return res
def getSleepSec(t):
sleep_sec = (dt.fromtimestamp(t)-dt.now()).total_seconds()
return sleep_sec
def getAvailableAuth(authList):
rates = [{'auth':auth, 'rate':getRateLimit(auth)} for auth in authList]
rates.sort(key=lambda x : x['rate']['reset'])
for x in rates:
sleep_sec = getSleepSec(x['rate']['reset'])
if x['rate']['remaining'] > 0 or sleep_sec < 0:
return x['auth'], 0
sleep_sec = getSleepSec(rates[0]['rate']['reset'])
return rates[0]['auth'], sleep_sec
def printLog(msg):
t = dt.now().strftime("%Y-%m-%dT%H:%M:%SZ")
print(f"[{t}] {msg}")
#Crawling + Storing
def loadRepositories(fname):
with open(fname, 'r') as f:
repos = csv.reader(f)
repos = [(name, repo, status) for name, repo, status in repos]
return repos
def handleReqException(r, authList):
# Handling exceptions. Sleep when rate limit exceeded.
if r.status_code == 404 or r.status_code == 451:
printLog("Error (%d) - %s " % (r.status_code, r.json()['message']))
return 'break'
elif r.status_code == 403:
j = r.json()
if 'message' in j and 'limit' in j['message']:
auth, sleep_sec = getAvailableAuth(authList)
if sleep_sec > 0:
printLog(f"Limit exceeded. Sleeping {sleep_sec} seconds.")
sleep(sleep_sec)
else:
printLog(f"Trying alternate credential - {auth[0]}")
return 'continue', auth, sleep_sec
return 'break'
def parseResponse(response, issues, repo_id):
for res in response:
issue = {
"repo_id": repo_id,
"id": res['id'],
"issue_number": res['number'],
"user_id": res['user']['login'] if 'user' in res else '',
"title": res['title'],
"state": res['state'],
"created_at": res['created_at'],
"closed_at": res['closed_at']
}
issues.append(issue)
def storeData(issues, mongo):
mongo['github'].issues.insert_many(issues)
def getURL(repo_id):
return f"https://api.github.com/repos/{repo_id}/issues"
def main(argv):
config_file = "settings.json" if len(argv) < 2 else argv[2]
printLog(f"Running with {config_file}")
config, authList = loadConfig(config_file)
if config is None or authList is None:
printLog("Cannot load configuration.")
exit(1)
print(authList)
mongo = getMongoClient(config)
repos = loadRepositories(config['repo_file'])
try:
for i in range(len(repos)):
owner, repo, status = repos[i]
if status == 'done':
continue
printLog(f"Collecting issues for {repo}")
auth, sleep_sec = getAvailableAuth(authList)
repo_id = f"{owner}/{repo}"
url = getURL(repo_id)
# Collect all issues.
params = {"page": 1, "per_page": config['per_page']}
issues = []
while True:
r = requests.get(url, params=params, auth=auth)
try:
r.raise_for_status()
response = r.json()
except:
cmd, auth, sleep_sec = handleReqException(r, authList)
if cmd == 'break':
break
elif cmd == 'continue':
continue
parseResponse(response, issues, repo_id)
if len(response) < config['per_page']:
break
params['page'] += 1
storeData(issues, mongo)
repos[i] = (owner, repo, 'done') # Mark finished project.
except Exception:
printLog("Error occurred while collecting data.")
traceback.print_exc()
finally:
with open(config['repo_file'], 'w', newline='') as f:
out = csv.writer(f)
out.writerows(repos)
mongo.close()
if __name__ == "__main__":
main(sys.argv)
```
#### File: Jindae/github_crawler/rate_limit_info.py
```python
import requests, json
from time import sleep
from datetime import datetime as dt
def getRateLimit(auth):
r = requests.get('https://api.github.com/rate_limit', auth=auth)
res = r.json()['rate']
return res
def loadConfig(config_file):
with open(config_file, 'r') as f:
config = json.load(f)
authList = [(name, key) for name, key in config['github.auth']]
return config, authList
remain = 0
tf = "%Y-%m-%dT%H:%M:%SZ"
config, authList = loadConfig('settings.json')
for auth in authList:
res = getRateLimit(auth)
remain += res['remaining']
print(auth[0], res)
reset_time = dt.fromtimestamp(res['reset'])
t = (reset_time-dt.now())
print(f"Reset time: {reset_time.strftime('%c')}, {t.total_seconds()} seconds remaining.")
``` |
{
"source": "jindal2309/conv-ai-model",
"score": 3
} |
#### File: jindal2309/conv-ai-model/dataloader.py
```python
import numpy as np
import torch
from torch.utils.data import Dataset
class ConvAIDataset(Dataset):
def parse(self):
"""
Note: This parse function expects the no_cands (no candidates) version of the ConvAI2 dataset.
This means that this function would have to be modified for both_ (both personas) and original_ (with multiple candidates) versions.
:return:
"""
with open(self.filename, 'r') as f:
chats = []
lines = f.readlines()
in_persona, in_dialog = False, False
chat = {'dialog':[], 'persona':[]}
for line in lines[:20]:
line = line.strip()
if len(line) == 0:
continue
# if next persona has started, add current set to list
if in_persona and in_dialog and 'your persona: ' in line:
# add curr to data
chats.append(chat)
chat = {'dialog':[], 'persona':[]}
in_persona, in_dialog = False, False
if 'your persona: ' in line:
in_persona = True
text = line.split('your persona: ')[1]
chat['persona'].append(text)
else:
in_dialog = True
idx = line.find(' ') + 1
text = line[idx:]
text = text.split('\t')
text = [t.strip() for t in text]
chat['dialog'].extend(text)
# add the last set
chats.append(chat)
return chats
def convert_to_bpe(self, data, bpe_vocab):
bpe_data = []
for chat in data:
dialog, persona = chat['dialog'], chat['persona']
dialog_toks = [bpe_vocab.string2ids(d) for d in dialog]
persona_toks = [bpe_vocab.string2ids(p) for p in persona]
# every input should have a response, so remove last one if number of utterances is odd
if len(dialog_toks) % 2 == 1:
dialog_toks = dialog_toks[:-1]
bpe_data.append((dialog_toks, persona_toks))
return bpe_data
def __init__(self, filename, max_seq_len, bpe_vocab):
self.filename = filename
self.max_seq_len = max_seq_len
self.vocab = bpe_vocab
chats = self.parse()
self.data = self.convert_to_bpe(chats, self.vocab)
def __getitem__(self, index):
dialog, persona = self.data[index]
persona = sum(persona, [])
persona = [self.vocab.info_bos_id] + persona[:self.max_seq_len-2] + [self.vocab.info_eos_id]
x = []
for i, toks in enumerate(dialog[:-1], 1):
if i % 2 == 1:
toks = [self.vocab.talker1_bos_id] + toks + [self.vocab.talker1_eos_id]
else:
toks = [self.vocab.talker2_bos_id] + toks + [self.vocab.talker2_eos_id]
x.extend(toks)
x = x[-self.max_seq_len:]
y = [self.vocab.bos_id] + dialog[-1] + [self.vocab.eos_id]
y = y[:self.max_seq_len]
return x, y, persona
def __len__(self):
return len(self.data)
```
#### File: conv-ai-model/transformer/tc_transformer.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from transformer.transformer_block import TransformerBlock
class TCTransformer(nn.Module):
def __init__(self, emb_dim, heads, depth, seq_length, num_tokens, num_classes, dropout=0.0):
super().__init__()
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_embeddings=num_tokens, embedding_dim=emb_dim)
self.pos_emb = nn.Embedding(num_embeddings=seq_length, embedding_dim=emb_dim) # position_encoding can be used instead of embedding
self.dropout = nn.Dropout(dropout)
# Sequence of transformer blocks that does the heavy lifting
trans_blocks = []
for i in range(depth):
trans_blocks.append(TransformerBlock(emb_dim=emb_dim, heads=heads))
self.trans_blocks = nn.Sequential(*trans_blocks)
# Maps final output sequence to class logits
self.to_probs = nn.Linear(emb_dim, num_classes)
def forward(self, x):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
"""
:param x: A (b, t) tensor of integer values representing
words (in some predetermined vocabulary).
:return: A (b, c) tensor of log-probabilities over the
classes (where c is the nr. of classes).
"""
# generate token embeddings
tokens = self.token_emb(x)
b, t, k = tokens.size()
# generate position embeddings
positions = torch.arange(t, device=device)
positions = self.pos_emb(positions)[None, :, :].expand(b, t, k)
x = tokens + positions
x = self.dropout(x)
x = self.trans_blocks(x)
# Avg pool over the t dimension and project to class probs
x = self.to_probs(x.mean(dim=1))
out = F.log_softmax(x, dim=1)
return out
``` |
{
"source": "jindalvi/meeting",
"score": 2
} |
#### File: meeting/common/models.py
```python
from django.db import models
from django.contrib.auth.models import *
from meeting.common.managers import *
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseModelManager()
created_by = models.ForeignKey(User,
related_name='%(app_label)s_%(class)s_creator',
on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
modified_by = models.ForeignKey(User,
related_name='%(app_label)s_%(class)s_modifier',
on_delete=models.CASCADE)
modified_on = models.DateTimeField(auto_now=True)
deleted_by = models.ForeignKey(User, null=True, blank=True,
related_name='%(app_label)s_%(class)s_deleter',
on_delete=models.CASCADE)
deleted_on = models.DateTimeField(null=True, blank=True)
is_deleted = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.id or not self.created_on:
self.created_on = timezone.now()
return super(BaseModel, self).save(*args, **kwargs)
class Country(models.Model):
code = models.CharField(max_length=2)
title = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Gender(models.Model):
title = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
```
#### File: common/templatetags/app_tags.py
```python
import re
from django import template
from meeting.common.models import *
register = template.Library()
@register.filter
def qs_to_css(qs, key):
css = list()
if qs or len(qs):
for item in qs:
css.append(str(item.__dict__.get(key)))
return ', '.join(css)
else:
return 'None'
@register.filter
def qs_to_css_stripped(qs, key):
css = list()
if qs or len(qs):
for item in qs:
css.append(str(item.__dict__.get(key)))
return ','.join(css)
else:
return 'None'
@register.filter
def firstcharacter(string):
return string[0]
@register.filter
def is_role_deleteable(role):
if EmployeeRole.objects.filter(role=role).exists():
return False
else:
return True
@register.filter
def is_allowed(employee, permission_code):
try:
roles = employee.employeemetadata.roles
employee_groups = employee.employeemetadata.groups.all()
permission_groups = set([y for x in roles for y in x.permission_groups.all()] + [y for x in employee_groups for y in x.permission_groups.all()])
module_permission = Permission.objects.get(permission_code=permission_code)
containing_groups = set([x for x in PermissionGroup.objects.filter(permissions=module_permission)])
if permission_groups.intersection(containing_groups):
return True
else:
return False
except Exception as e:
return False
@register.filter
def float_to_percent(value):
if type(value) == float:
return '{:.2f}'.format(round((value * 100), 2))
else:
return 0.0
@register.filter
def absolute_value(value, precision=2):
try:
float(value)
return format(value, '.{0}f'.format(precision))
except Exception as e:
return value
@register.filter
def subtract(x, y):
try:
x = float(x)
y = float(y)
result = x - y
if result.is_integer():
result = int(result)
return result
except Exception as e:
return x
@register.filter
def add(x, y):
try:
x = float(x)
y = float(y)
result = x + y
if result.is_integer():
result = int(result)
return result
except Exception as e:
return x
@register.filter
def is_future_date(value):
today = timezone.now().date()
if value > today:
return True
return False
@register.filter
def is_date_from_current_month_year(value):
today = timezone.now().date()
if value.month < today.month or value.month > today.month:
return False
return True
@register.filter
def previous_year(value):
try:
print(type(value))
return int(value) - 1
except Exception as e:
return value
@register.filter
def next_year(value):
try:
print(type(value))
return int(value) + 1
except Exception as e:
return value
@register.filter
def dictionary_has_key(dictionary, key):
if dictionary:
if dictionary.get(key, None):
return True
return False
else:
return False
@register.filter
def value_from_dictionary(dictionary, key):
if dictionary:
return dictionary.get(key, None)
return None
```
#### File: meeting/customer/models.py
```python
from django.db import models
from tenant_schemas.models import TenantMixin
from meeting.common.utils import *
class Tenant(TenantMixin):
name = models.CharField(max_length=128)
slug = models.CharField(max_length=128)
paid_until = models.DateField()
on_trial = models.BooleanField(default=True)
created_on = models.DateField(auto_now_add=True)
logo = ContentTypeRestrictedFileField(upload_to=upload_to,
max_length=512, null=True, blank=True)
testing_email = models.CharField(max_length=255,
null=True, blank=True)
activate_emails = models.BooleanField(default=False)
testing_mode = models.BooleanField(default=False)
setup_completed = models.BooleanField(default=False)
auto_create_schema = True
auto_drop_schema = True
def __str__(self):
return self.name
def save(self, *args, **kwargs):
unique_slugify(self, self.name)
super(Tenant, self).save(*args, **kwargs)
```
#### File: meeting/customer/setup.py
```python
import os
import re
import json
import uuid
import base64
from meeting.customer.models import *
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.core.files.base import ContentFile
from tenant_schemas.utils import tenant_context
from django.core.management import call_command
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
def codify(label):
code = re.sub('[^a-zA-Z0-9 \n\.]', '', label)
code = '_'.join(label.split(' '))
return code.lower()
def setup_public_tenant():
'''
*setup_public_tenant* is a utility to function to populate system with one public tenant in either environments.
Args:
It takes no argument
Return:
*setup_public_tenant* does not return anything
'''
call_command('makemigrations', 'customer')
call_command('migrate_schemas', '--shared')
if settings.SETTINGS_MODULE == 'config.settings.local':
tenant = Tenant.objects.create(domain_url='meeting.localhost', schema_name='public', name='Meeting', setup_completed=True, paid_until=timezone.now().date())
user, c = User.objects.get_or_create(username=settings.SYSTEM_ADMIN, email=settings.SYSTEM_ADMIN, first_name='System', last_name='Admin', is_staff=True, is_superuser=True)
user.set_password('<PASSWORD>')
user.save()
def setup_new_tenant(tenant_data, user_data):
tenant_data.update({
'subscription_mode': 'T',
'subscribed_on': timezone.now(),
'subscription_ends_on': timezone.now() + timedelta(days=7)
})
data = {
'name':'testing',
'schema_name':'tenant',
'setup_completed':True,
'domain_url':'testing.meeting.localhost',
'paid_until':timezone.now().date()
}
tenant = Tenant.objects.create(**tenant_data)
call_command('makemigrations', 'common')
call_command('migrate_schemas', '--tenant')
user_data = {
'username': '<EMAIL>',
'email': '<EMAIL>',
'first_name': 'Vikas',
'last_name': 'Jindal'
}
with tenant_context(tenant):
password = user_data.pop('password', '<PASSWORD>')
user = User.objects.create(**user_data)
user.set_password(password)
user.save()
print('Here we go!!')
print('Providing initial data via loaddata')
loaddata()
print('Completed importing initial data.')
print('**********************************')
print('\n\n')
tenant.setup_completed = True
tenant.save()
print('Exiting setting up new tenant utility function. Enjoy!')
``` |
{
"source": "Jindam/HPCGISLab",
"score": 2
} |
#### File: pcml/core/Decomposition.py
```python
from .Layer import *
from .Subdomain import *
import math
# Defines the number of subdomains (e.g., rows) to decompose a single layer into.
numchunksgoal=2
numchunksgoal=16
def globalpointlistdecomposition(layer, buffersize):
# Row decomposition supports pointlist only for globalclass operations
if layer.data_structure==Datastructure.pointlist:
if buffersize>=0: # Then it is not globalclass operation
raise PCMLNotSupported("Currently globalpointlistdecomposition only supports globalclass+pointlist")
# If this layer is a pointlist, then it is assumed to be global operation
# so just copy layer information and duplicate the subdomain
subdomain = Subdomain(layer.y, layer.x, layer.h, layer.w, layer.title+" subdomain pointlist")
subdomain.set_pointlist(layer.get_pointlist())
subdomainlist=[]
for sdind in xrange(numsubdomains):
subdomainlist.append(subdomain)
return subdomainlist
else:
raise PCMLNotSupported("globalpointlistdecomposition only supports pointlist datastructures")
# Take a layer and return a list of subdomains
def rowdecomposition(layer, buffersize):
print "Row decomposition"
# Row decomposition supports pointlist only for globalclass operations
if layer.data_structure==Datastructure.pointlist:
globalpointlistdecomposition(layer,buffersize)
assert(layer.data_structure==Datastructure.array)
# If global then buffer size is infinite as all subdomains will have all data
if buffersize<0: # This indicates the buffer should be infinite sized (global/zonal operation)
buffersize=9999999999999
# FIXME: I should do the same global subdomain as pointlist here
# Sanity check nrows and ncols
# FIXME: In the future this check will happen in operation._decompositioninit
assert(layer.nrows!=None)
assert(layer.ncols!=None)
subdomainlist = []
# Numer of rows per subdomain given a number of chunks goal (numchunksgoal)
rowspersubdomain = int(math.ceil(float(layer.nrows)/float(numchunksgoal)))
# Number of subdomains to create when given rowspersubdomain
numsubdomains = int(math.ceil(float(layer.nrows)/float(rowspersubdomain)))
# For each subdomain indexed by sdind, calculate the size
for sdind in xrange(numsubdomains):
# First row in the subdomain
r = rowspersubdomain*sdind
# Default number of rows for this subdomain
nrows = rowspersubdomain # Number of rows for this sudomain
if buffersize>0: # If we have a buffer (e.g., focal operation), then add the buffer
# A buffer will generally reduce r by buffersize and increase nrows by buffersize*2
# However, r and r+nrows must be contained within the range 0-layer.nrows
new_r=max(0,r-buffersize) # Calculate new r value making sure it is not negative
new_h=min(layer.nrows,r+nrows+buffersize) # calculate new height making sure it is <= layer.nrows
# Replace original r and nrows with new values
nrows=new_h-new_r
r=new_r
print "new_r",new_r,"new_h",new_h
else: # Ensure that we don't allocate more rows past the number of layer rows
nrows=min(layer.nrows-r,nrows)
# Sanity check
print "r",r,"nrows",nrows,"layer.nrows",layer.nrows
assert(r+nrows<=layer.nrows)
# In row decomposition, column index is always 0 and ncols never changes
c = 0
ncols = layer.ncols
# Now derive y, x, h, w
y = layer.y + r * layer.cellsize
h = nrows * layer.cellsize
# In row decomposition: x and w always remain the same
x = layer.x
w = layer.w
# Create a subdomain and populate it with the correct attribute values
subdomain = Subdomain(y, x, h, w, layer.title+" subdomain "+str(sdind))
subdomain.cellsize=layer.cellsize
subdomain.nodata_value=layer.nodata_value
subdomain.r=r
subdomain.c=c
subdomain.nrows=nrows
subdomain.ncols=ncols
# Extract an array slice (reference to data in a layer for lower memory overhead)
# from the layer and set the data reference for the subdomain to use
arrslice=layer.slice_nparray(r,0,nrows,ncols)
subdomain.set_data_ref(arrslice)
# Add the subdomain to the list
subdomainlist.append(subdomain)
return subdomainlist
# Take a layer and return a list of subdomains
def columndecomposition(layer, buffersize):
print "Column decomposition"
# Col decomposition supports pointlist only for globalclass operations
if layer.data_structure==Datastructure.pointlist:
globalpointlistdecomposition(layer,buffersize)
assert(layer.data_structure==Datastructure.array)
# If global then buffer size is infinite as all subdomains will have all data
if buffersize<0: # This indicates the buffer should be infinite sized (global/zonal operation)
buffersize=9999999999999
# FIXME: I should do the same global subdomain as pointlist here
# Sanity check nrows and ncols
# FIXME: In the future this check will happen in operation._decompositioninit
assert(layer.nrows!=None)
assert(layer.ncols!=None)
subdomainlist = []
# Numer of columns per subdomain given a number of chunks goal (numchunksgoal)
colspersubdomain = int(math.ceil(float(layer.ncols)/float(numchunksgoal)))
# Number of subdomains to create when given colspersubdomain
numsubdomains = int(math.ceil(float(layer.ncols)/float(colspersubdomain)))
# For each subdomain indexed by sdind, calculate the size
for sdind in xrange(numsubdomains):
# First col in the subdomain
c = colspersubdomain*sdind
# Default number of columns for this subdomain
ncols = colspersubdomain # Number of columns for this sudomain
if buffersize>0: # If we have a buffer (e.g., focal operation), then add the buffer
# A buffer will generally reduce c by buffersize and increase ncols by buffersize*2
# However, c and c+ncols must be contained within the range 0-layer.ncols
new_c=max(0,c-buffersize) # Calculate new c value making sure it is not negative
new_w=min(layer.ncols,c+ncols+buffersize) # calculate new width making sure it is <= layer.ncols
# Replace original c and ncols with new values
ncols=new_w-new_c
c=new_c
# Sanity check
assert(c+ncols<=layer.ncols)
# In column decomposition, row index is always 0 and nrows never changes
r = 0
nrows = layer.nrows
# Now derive y, x, h, w
x = layer.x + c * layer.cellsize
w = ncols * layer.cellsize
# In column decomposition: y and h always remain the same
y = layer.y
h = layer.h
# Create a subdomain and populate it with the correct attribute values
subdomain = Subdomain(y, x, h, w, layer.title+" subdomain "+str(sdind))
subdomain.cellsize=layer.cellsize
subdomain.nodata_value=layer.nodata_value
subdomain.r=r
subdomain.c=c
subdomain.nrows=nrows
subdomain.ncols=ncols
# Extract an array slice (reference to data in a layer for lower memory overhead)
# from the layer and set the data reference for the subdomain to use
arrslice=layer.slice_nparray(0,c,nrows,ncols)
subdomain.set_data_ref(arrslice)
# Add the subdomain to the list
subdomainlist.append(subdomain)
return subdomainlist
```
#### File: pcml/core/Operation.py
```python
from ..util.Messaging import *
from .Decomposition import *
from .BoundingBox import *
from abc import ABCMeta, abstractmethod
class OpClass():
""" Enumeration class. Classifies operations as local, focal, zonal, or global.
"""
localclass = 1
focalclass = 2
zonalclass = 3
globalclass = 4
class Operation(object):
__metaclass__ = ABCMeta
def __init__(self,name,*args,**kwargs):
"""Operations are applied to layers.
Args:
:param name (string): String representation of Operation name
:param layers (tuple): Tuple of layers to apply operation on
:param opclass (OpClass): Operation classification (local, focal, zonal, global)
"""
# Derive class name from operation name
self.name = name
PCMLTODO("Only row decomposition method supported, so hardcoding for now")
#self.decomposition_method=DecompositionMethod.row
_layerstuple = kwargs.get('layers', None)
if _layerstuple!=None:
self._layers = list(_layerstuple)
self.opclass = kwargs.get('opclass', OpClass.localclass)
self.buffersize = kwargs.get('buffersize', 0)
self.decomposition = kwargs.get('decomposition',rowdecomposition) # By default use row decomposition
if self.opclass==OpClass.localclass and self.buffersize != 0:
raise PCMLOperationError("Buffersize should be 0 for localclass currently %s" % self.buffersize)
#If zonal operation we want the entire layer data
if self.opclass==OpClass.zonalclass:
self.buffersize=999999999999
def __repr__(self):
return "<Operation: %s : %i layers>" % (self.name,len(self._layers))
def getOutputLayers(self):
PCMLTODO("Need to support more than one output layer")
return self._layers[0]
def _decompositioninit(self):
# Duplicate a layer to create an output layer with the correct dimensions
# Get the first layer
firstlayer=self._layers[0]
# The output layer is a duplicate of the first layer
outputlayer=firstlayer.duplicate()
outputlayer.title="Output for operation %s"%self.name
self._layers.insert(0, outputlayer) # Add the output layer to the front of the layers list
# By default we use rowdecomposition as our decomposition method
# Users may override decomposition with any other method they would like
#def decomposition(self,layer,buffersize):
# return rowdecomposition(layer,buffersize)
def _decompositionrun(self):
""" Divides the :member:_layers into subdomains for further processing.
The decomposition method is defined by :member:`decompositionmethod`.
You can also define you own decomposition algorithm by overriding this method.
"""
PCMLTODO("Need to support multiple output layers, this can be done by overriding decomposition and inserting multiple output layers")
listofsubdomains = []
self._decompositioninit()
# The output layer is the first layer in the layers list (self.layers[0])
# Decompose it with a 0 buffer
#listofsubdomains.append(self._layers[0].decomposition(self.decomposition_method, 0))
listofsubdomains.append(self.decomposition(self._layers[0], 0))
for layer in self._layers:
if layer != self._layers[0]: # Skip the output layer, because it was already decomposed and added
#listofsubdomains.append(layer.decomposition(self.decomposition_method, self.buffersize)) # buffer size is set based on classification (L,F,Z,G)
listofsubdomains.append(self.decomposition(layer, self.buffersize)) # buffer size is set based on classification (L,F,Z,G)
# The listofsubdomains is inverted using zip and map to create a list of lists
# so that each subdomain is grouped with the corresponding subdomain from each layer (see example below)
subdomainlists = map(list,zip(*listofsubdomains))
# listofsubdomains = ( (layer1subdomain1 , layer1subdomain2) , (layer2subdomain1 , layer2subdomain2) )
# subdomainlists = ( (layer1subdomain1 , layer2subdomain1) , (layer1subdomain2 , layer2subdomain2) )
return subdomainlists
def executor(self,subdomains):
""" Executor handles processing of the function by iterating over locations in a subdomain
:return: #TODO: Undefined return value.
"""
PCMLTODO("executor assumes single subdomain as output, which is not universal for all operations")
outsubdomain = subdomains.pop(0)
outarr = outsubdomain.get_nparray()
if outsubdomain.data_structure!=Datastructure.array:
print "datatype",outsubdomain.data_type,"arraydt",Datastructure.array
PCMLNotSupported("Executor currently assumes an array data structure")
PCMLTODO("Sanity check subdomains are all the same dimensions")
# Iterate over locations in the outsubdomain and apply function to each location
#for locind in outsubdomain:
for loc in outsubdomain:
l = [] # Create an empty list to store locations
for sd in subdomains:
if sd.data_structure!=Datastructure.array: # Skip non array subdomains
continue
# Get a location in this subdomain with same coordinates as locind
locv=sd.get_locval(loc)
l.append(locv) # append to list of locations
val = self.function(l,subdomains) # Apply function to all locations
outarr[loc['r']-outsubdomain.r][loc['c']-outsubdomain.c]=val # Set val to outarr at locind
def function(self,locations,subdomains):
raise PCMLOperationError("Operation function is not implemented")
```
#### File: pcml/lib/LocalOperationExecutors.py
```python
from ..core.Operation import *
from ..core.Scheduler import *
from ..util.OperationBuilder import *
import numpy as np
import types
import math
@executor
@localoperation
def LocalSum_np(self, subdomains):
# NOTE: Assumes 3 subdomains, first is output, second and third should be added
# Get the array from the output subdomain
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
# Apply numpy operation to arrays from second and third subdomains
arr=np.add(subdomains[1].get_nparray(),subdomains[2].get_nparray())
# Copy values to outarr (outsubdomain)
outarr[:,:]=arr
# Notice we don't need to return anything, because the resulting array (arr) is copied to outsubdomain through outarr
@executor
@localoperation
def LocalMult_np(self, subdomains):
# NOTE: Assumes 3 subdomains, first is output, second and third should be added
# Get the array from the output subdomain
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
# Apply numpy operation to arrays from second and third subdomains
arr=np.multiply(subdomains[1].get_nparray(),subdomains[2].get_nparray())
# Copy values to outarr (outsubdomain)
outarr[:,:]=arr
@executor
@localoperation
def LocalMaximum_np(self, subdomains):
#finding Local Maximum among the given locations
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
arr= np.maximum(subdomains[1].get_nparray(),subdomains[2].get_nparray())
outarr[:,:]=arr
@executor
@localoperation
#finding Local Minimum among the given locations
def LocalMinimum_np(self, subdomains):
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
arr = np.minimum(subdomains[1].get_nparray(),subdomains[2].get_nparray())
outarr[:,:]=arr
@executor
@localoperation
#finding Local Mean among the given locations
def LocalMean_np(self, subdomains):
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
arr=np.add(subdomains[1].get_nparray(),subdomains[2].get_nparray())
denom=len(subdomains)-1
outarr[:,:]=arr/denom
@executor
@localoperation
#finding Local Difference among the given locations
def LocalDifference_np(self, subdomains):
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
arr1=np.array(subdomains[1].get_nparray())
arr2=np.array(subdomains[2].get_nparray())
arr=arr1-arr2
outarr[:,:]=arr
@executor
@localoperation
#finding Local Product among the given locations
def LocalProduct_np(self, subdomains):
outsubdomain = subdomains[0]
outarr = outsubdomain.get_nparray()
arr=np.multiply(subdomains[1].get_nparray(),subdomains[2].get_nparray())
outarr[:,:]=arr
``` |
{
"source": "jinde-liu/deeplabv3-_occ5000",
"score": 2
} |
#### File: deeplabv3-_occ5000/utils/saver.py
```python
import os
import shutil
import torch
from collections import OrderedDict
import glob
import time
class Saver(object):
# make save dir in the time format
def __init__(self, args):
self.args = args
self.directory = os.path.join('run', args.dataset, args.checkname)
self.runs = sorted(glob.glob(os.path.join(self.directory, '*')))
run_id = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
self.experiment_dir = os.path.join(self.directory, run_id)
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk and copy it to ./run dir if it is the best
state: state dict
is_best: if current checkpoint is best copy it to ./run
filename: checkpoint file name
"""
filename = os.path.join(self.experiment_dir, filename)
torch.save(state, filename)
if is_best:
best_pred = state['best_pred']
epoch = state['epoch']
class_miou = state['class_miou']
with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:
f.write(str(best_pred) + '\n')
f.write('background: {:.2%}\n'.format(class_miou[0]))
f.write('hair: {:.2%}\n'.format(class_miou[1]))
f.write('face: {:.2%}\n'.format(class_miou[2]))
f.write('torso: {:.2%}\n'.format(class_miou[3]))
f.write('left_arm: {:.2%}\n'.format(class_miou[4]))
f.write('right_arm: {:.2%}\n'.format(class_miou[5]))
f.write('left_hand: {:.2%}\n'.format(class_miou[6]))
f.write('right_hand: {:.2%}\n'.format(class_miou[7]))
f.write('left_leg: {:.2%}\n'.format(class_miou[8]))
f.write('right_leg: {:.2%}\n'.format(class_miou[9]))
f.write('left_foot: {:.2%}\n'.format(class_miou[10]))
f.write('right_foot: {:.2%}\n'.format(class_miou[11]))
f.write('accessory: {:.2%}\n'.format(class_miou[12]))
if self.runs:
previous_miou = [0.0]
for run in self.runs:
path = os.path.join(run, 'best_pred.txt')
if os.path.exists(path):
with open(path, 'r') as f:
miou = float(f.readline())
previous_miou.append(miou)
else:
continue
max_miou = max(previous_miou)
if best_pred > max_miou:
shutil.copyfile(filename, os.path.join(self.directory, str(epoch)+'-'+str(best_pred)+'-model_best.pth.tar'))
else:
shutil.copyfile(filename, os.path.join(self.directory, str(epoch)+'-'+str(best_pred)+'-model_best.pth.tar'))
# save parameters in 'parameter.txt' --kidd
def save_experiment_config(self):
logfile = os.path.join(self.experiment_dir, 'parameters.txt')
log_file = open(logfile, 'w')
p = OrderedDict()
p['datset'] = self.args.dataset
p['backbone'] = self.args.backbone
p['out_stride'] = self.args.out_stride
p['lr'] = self.args.lr
p['lr_scheduler'] = self.args.lr_scheduler
p['loss_type'] = self.args.loss_type
p['epoch'] = self.args.epochs
p['base_size'] = self.args.base_size
p['crop_size'] = self.args.crop_size
for key, val in p.items():
log_file.write(key + ':' + str(val) + '\n')
log_file.close()
``` |
{
"source": "jinde-liu/HKSL",
"score": 2
} |
#### File: jinde-liu/HKSL/inference_single_image.py
```python
from PIL import Image
import numpy as np
import torch
import torchvision.transforms as tr
from modeling.deeplab import DeepLab
from dataloaders.utils import decode_segmap
from utils.metrics import Evaluator
import matplotlib.pyplot as plt
import torch.nn as nn
from args import Args_occ5000
import os
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load(
"/home/kidd/kidd1/pytorch-deeplab-xception/run/occ5000/deeplab_v3+_noflip/50-0.7067296461287129-model_best.pth.tar")
checkpoint2 = torch.load(
'/home/kidd/kidd1/HKSL/run/occ5000/deeplab_v3+_noflip_kinematic/50-0.7107144180176619-model_best.pth.tar'
)
args = Args_occ5000()
args2 = Args_occ5000()
args2.use_kinematic = True
model = DeepLab(
args=args,
num_classes=13,
backbone='resnet',
output_stride=16,
sync_bn=True,
freeze_bn=False)
model2 = DeepLab(
args=args2,
num_classes=13,
backbone='resnet',
output_stride=16,
sync_bn=True,
freeze_bn=False)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model.to(device)
torch.set_grad_enabled(False)
model2.load_state_dict(checkpoint2['state_dict'])
model2.eval()
model2.to(device)
torch.set_grad_enabled(False)
def transform(image):
return tr.Compose([
tr.ToTensor(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])(image)
# Read eval image and gt
dataset_base = '/home/kidd/kidd1/Occ5000'
im_list = []
ann_list = []
with open(dataset_base + '/list/val_all2500.txt', 'r') as f:
lines = f.read().splitlines()
for line in lines:
im_path = line[0:line.find('.png') + 4]
ann_path = line[line.find('.png') + 5:]
assert os.path.isfile(dataset_base + im_path)
assert os.path.isfile(dataset_base + ann_path)
im_list.append(dataset_base + im_path)
ann_list.append(dataset_base + ann_path)
assert (len(im_list) == len(ann_list)), 'number not same in im and ann!'
print('Number of images in {}:{:d}'.format('val', len(im_list)))
for i in range(len(im_list)):
if i % 24 == 0:
print('processed %d images'%i)
image = Image.open(im_list[i])
gt_im = Image.open(ann_list[i])
gt = np.array(gt_im)
gt_rgb = decode_segmap(gt, dataset="occ5000")
# Inference and set the visual color map
inputs = transform(image).to(device)
output = model(inputs.unsqueeze(0)).squeeze().cpu().numpy()
output2 = model2(inputs.unsqueeze(0)).squeeze().cpu().numpy()
pred = np.argmax(output, axis=0)
pred_rgb = decode_segmap(pred, dataset="occ5000")
pred2 = np.argmax(output2, axis=0)
pred_rgb2 = decode_segmap(pred2, dataset="occ5000")
fig = plt.figure()
plt.subplot(1, 4, 1)
plt.imshow(image)
plt.axis('off')
plt.subplot(1, 4, 2)
plt.imshow(gt_rgb)
plt.axis('off')
plt.subplot(1, 4, 3)
plt.imshow(pred_rgb)
plt.axis('off')
plt.subplot(1, 4, 4)
plt.imshow(pred_rgb2)
plt.axis('off')
plt.savefig('/home/kidd/kidd1/HKSL/run/occ5000/deeplab_v3+_noflip_kinematic/results_images/' + str(i) + '.png')
#plt.show()
plt.close(fig)
# eval = Evaluator(13)
# eval.reset()
# eval.add_batch(gt, pred)
# miou = eval.Mean_Intersection_over_Union()
# print(miou)
# class_miou = eval.Class_Intersection_over_Union()
# print(class_miou)
```
#### File: HKSL/modeling/deeplab.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder_kinematic, build_decoder
from modeling.backbone import build_backbone
from modeling.kinematic_graph import build_kinematic_graph
class DeepLab(nn.Module):
def __init__(self, args, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False):
super(DeepLab, self).__init__()
self.args = args
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
if self.args.use_kinematic == False:
self.decoder = build_decoder(num_classes, backbone, BatchNorm)
else:
self.decoder = build_decoder_kinematic(backbone, BatchNorm)
self.kinematic_layer = build_kinematic_graph(BatchNorm)
self.freeze_bn = freeze_bn
def forward(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
if not self.args.use_kinematic:
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
else:
x = self.kinematic_layer(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
if self.args.use_kinematic:
modules = [self.aspp, self.decoder, self.kinematic_layer]
elif not self.args.use_kinematic:
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
from args import Args_occ5000
from tensorboardX import SummaryWriter
writer = SummaryWriter('/home/kidd/Documents/graph1')
args = Args_occ5000()
model = DeepLab(args=args, backbone='resnet', output_stride=16)
model.eval()
input = torch.rand(1, 3, 513, 513)
output = model(input)
writer.add_graph(model, input)
writer.close()
print(output.size())
``` |
{
"source": "jindeok/SwapRNN_L",
"score": 3
} |
#### File: SwapRNN_L/Swap/createdata.py
```python
import networkx as nx
import numpy as np
def graphs_to_matrix(graphs):
mat_list = []
for i in graphs:
mat = nx.to_numpy_matrix(i)
mat = np.triu(mat, k = 1)
mat = np.matrix(mat)
mat_list.append(mat.flatten())
return mat_list
def create_graphs(graphtype = "grid"):
if graphtype == "grid":
graphs = []
for i in range(4,6):
for j in range(4,6):
graphs.append(nx.grid_2d_graph(i,j))
if graphtype == "tri-grid":
graphs = []
for i in range(4,6):
for j in range(4,5):
graphs.append(nx.triangular_lattice_graph(i,j))
if graphtype == "b-a":
graphs = []
for i in range(80,81):
for j in range(2,3): # j should be lower tahn i ( j = # of edges , i = # of nodes )
graphs.append(nx.barabasi_albert_graph(i,j))
if graphtype == "Karate":
graphs = []
graphs.append(nx.karate_club_graph())
return graphs
```
#### File: jindeok/SwapRNN_L/swapmodel.py
```python
import networkx as nx
import numpy as np
import random as rnd
from random import sample
from random import randrange
import tensorflow as tf
from Masking import GraphMasking
#import keras
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, ZeroPadding1D
import math
from numba import jit
from numpy import arange
from functools import reduce
from data import *
from args import *
from model import *
from train import *
from model import *
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
#200624: predtheta에도 프루닝을 해서 적용해보자 !
args=Args()
#Generate Randomtheta when Proxy theta training
def RandomThetaGenerator(pred):
pred = PredPruning(pred, 0.6)
numnode = int(math.sqrt(len(pred.T)))
predProxy = pred.reshape(numnode,numnode)
G = nx.from_numpy_matrix(predProxy)
I = np.ones((numnode,numnode))
randportion = rnd.uniform(0,1) # It is verified that without matching portion, it still works.
#randportion =0.1
Gprox = GraphMasking(G, method = 'random edge sampling', portion = randportion)
GproxMat = nx.to_numpy_matrix(Gprox)
randtheta = I - GproxMat
return randtheta.flatten()
def RstToBinarymat(result, numnode):
adj_result=[]
for i in result[0]:
if i <= 0:
adj_result.append(0)
elif i >= 1:
adj_result.append(1)
else:
a = np.random.binomial(n=1, p= i, size=1)
adj_result.append(a[0])
adj_result = np.array(adj_result)
adj_result = adj_result.reshape(numnode,numnode)
w = np.triu(adj_result, k = 1)
result = w + w.T
return result
def PredPruning(result, thres):
adj_result=[]
for i in result[0]:
if i <= thres:
adj_result.append(0)
elif i >= thres:
adj_result.append(1)
adj_result = np.array(adj_result)
return adj_result
def GenerateMaskedPair(X_train, X_train_copy, delportion):
Y1_train = []
Y2_train = []
for i in X_train:
Gm1 = GraphMasking(i, method= 'random edge sampling', portion = delportion)
Y1_train.append(Gm1)
for j in X_train_copy:
Gm2 = GraphMasking(j, method= 'random edge sampling', portion = delportion)
Y2_train.append(Gm2)
return Y1_train, Y2_train
def maxnode(graphset):
temp = len(reduce(lambda w1, w2: w1 if len(w1)>len(w2) else w2, graphset))
return int(np.sqrt(temp))
''''''''''''
'''model '''
''''''''''''
#Swap training model
tf.keras.backend.set_floatx('float64')
class model:
def __init__(self, a, b, r, d, lr, S, M): # S for checking max.node size
xavier=tf.keras.initializers.glorot_uniform
self.maxnumnode = maxnode(S)
print("maxnumnode for the model instance is : ", self.maxnumnode)
self.maxprev = M
self.learning_rate = lr
self.l1=tf.keras.layers.Dense(self.maxnumnode**2, kernel_initializer=xavier, activation=tf.nn.leaky_relu,input_shape=[1])
self.l2=tf.keras.layers.Dense(128,kernel_initializer=xavier,activation=tf.nn.leaky_relu)
self.l3=tf.keras.layers.Dense(128,kernel_initializer=xavier,activation=tf.nn.leaky_relu)
self.out=tf.keras.layers.Dense(self.maxnumnode**2, kernel_initializer=xavier, activation=tf.nn.sigmoid)
self.lprox1=tf.keras.layers.Dense(self.maxnumnode**2,kernel_initializer=xavier,activation=tf.nn.leaky_relu,input_shape=[1])
self.lprox2=tf.keras.layers.Dense(128,kernel_initializer=xavier,activation=tf.nn.leaky_relu)
self.lprox3=tf.keras.layers.Dense(128,kernel_initializer=xavier,activation=tf.nn.leaky_relu)
self.outprox=tf.keras.layers.Dense(self.maxnumnode**2,kernel_initializer=xavier,activation=tf.nn.sigmoid)
self.lr_schedule = keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=self.learning_rate,decay_steps=1000,decay_rate=0.9)
self.train_op = tf.keras.optimizers.Adam(learning_rate= self.lr_schedule)
#GraphRNN part layer
self.rnn = GRU_plain(input_size=M, embedding_size=args.embedding_size_rnn,
hidden_size=args.hidden_size_rnn, num_layers=args.num_layers, has_input=True,
has_output=True, output_size=args.hidden_size_rnn_output).cuda()
self.output = GRU_plain(input_size=1, embedding_size=args.embedding_size_rnn_output,
hidden_size=args.hidden_size_rnn_output, num_layers=args.num_layers, has_input=True,
has_output=True, output_size=1).cuda()
self.optimizer_rnn = optim.Adam(list(self.rnn.parameters()), lr=self.learning_rate)
self.optimizer_output = optim.Adam(list(self.output.parameters()), lr=self.learning_rate)
self.scheduler_rnn = MultiStepLR(self.optimizer_rnn, milestones=args.milestones, gamma=1)
self.scheduler_output = MultiStepLR(self.optimizer_output, milestones=args.milestones, gamma=1)
#Training hyperparameter for loss fuction
self.alpha = a
self.beta = b
self.gamma = r
self.delta = d
self.loss = []
# Running the model
def run(self,X):
boom=self.l1(X)
boom1=self.l2(boom)
boom2=self.l3(boom1)
boom3=self.out(boom2)
return boom3
def runtheta(self,Y):
boom=self.lprox1(Y)
boom1=self.lprox2(boom)
boom2=self.lporx3(boom1)
boom3=self.outprox(boom2)
return boom3
#Custom loss fucntion
def get_loss(self,Y1,Y2,numnode):
# non-proxy part of the model
input1=self.l1(Y1)
h1=self.l2(input1)
h1_2=self.l3(h1)
pred1=self.out(h1_2)
input2=self.l1(Y2)
h2=self.l2(input2)
h2_2=self.l3(h2)
pred2=self.out(h2_2)
#generate random theta
self.thetaprox1 = RandomThetaGenerator(pred1.numpy()) #매 epoch마다 다른 theta
self.thetaprox2 = RandomThetaGenerator(pred2.numpy())
# proxy part of the model
#Estimation of theta1
inputprox1=self.lprox1(Y1)
hprox1=self.lprox2(inputprox1)
hprox1_2=self.lprox3(hprox1)
predprox1=self.outprox(hprox1_2)
#Estimation of theta2
inputprox2=self.lprox1(Y2)
hprox2=self.lprox2(inputprox2)
hprox2_2=self.lprox3(hprox2)
predprox2=self.outprox(hprox2_2)
#graphRNN part
predseq1 = self.pred_to_sequence(Y1,numnode)
predseq2 = self.pred_to_sequence(Y2,numnode)
rnn_loss1 = get_rnn_loss(args, self.rnn, self.output, predseq1)
rnn_loss2 = get_rnn_loss(args, self.rnn, self.output, predseq2)
rnn_loss = rnn_loss1+rnn_loss2
pytorch_tensor = rnn_loss.cpu()
np_tensor = pytorch_tensor.detach().numpy()
rnn_loss = tf.convert_to_tensor(np_tensor, dtype= tf.float64)
#swap loss , self loss
swap_loss = tf.square(Y2 - tf.multiply(pred1,predprox2)) + tf.square(Y1 - tf.multiply(pred2, predprox1))
self_loss = tf.square(Y1 - tf.multiply(pred1,predprox1)) + tf.square(Y2 - tf.multiply(pred2, predprox2))
#prox loss
proxx_loss = tf.square(pred1 - self.run(tf.multiply(pred1,self.thetaprox1))) + tf.square(pred2 - self.run(tf.multiply(pred2,self.thetaprox2)))
proxtheta_loss = tf.square(self.thetaprox1 - predprox1) + tf.square(self.thetaprox2 - predprox2)
# entire loss
totloss_swap = tf.reduce_mean(swap_loss + self.alpha*self_loss + self.beta*proxx_loss + self.gamma*proxtheta_loss, axis=None)
totloss = totloss_swap + self.delta*rnn_loss
Lswap_torch = totloss_swap.numpy()
Lswap_torch = np.array(Lswap_torch)
Lswap_torch = torch.from_numpy(Lswap_torch)
#return totloss
return Lswap_torch, totloss
def rnn_epoch(self,Y1,Y2,numnode,swaploss):
predseq1 = self.pred_to_sequence(Y1,numnode)
predseq2 = self.pred_to_sequence(Y2,numnode)
rnn_loss1 = get_rnn_loss(args, self.rnn, self.output, predseq1)
rnn_loss2 = get_rnn_loss(args, self.rnn, self.output, predseq2)
rnn_loss = rnn_loss1+rnn_loss2
rnnpart_loss = self.delta*rnn_loss
swaploss = swaploss.type(torch.FloatTensor)
totloss = rnnpart_loss.add(swaploss)
totloss.backward()
#rnn_loss.backward()
self.rnn_apply_grad(self.optimizer_output, self.optimizer_rnn, self.scheduler_output, self.scheduler_rnn)
# get gradients
def get_grad(self,Y1,Y2,numnode):
with tf.GradientTape() as tape:
tape.watch(self.l1.variables)
tape.watch(self.l2.variables)
tape.watch(self.l3.variables)
tape.watch(self.out.variables)
Lswap, Ltot = self.get_loss(Y1,Y2,numnode)
g1 = tape.gradient(Ltot, [self.l1.variables[0],self.l1.variables[1],self.l2.variables[0],self.l2.variables[1],self.l3.variables[0],self.l3.variables[1],self.out.variables[0],self.out.variables[1]])
with tf.GradientTape() as tape2:
tape2.watch(self.lprox1.variables)
tape2.watch(self.lprox2.variables)
tape2.watch(self.lprox3.variables)
tape2.watch(self.outprox.variables)
Lswap, Ltot = self.get_loss(Y1,Y2,numnode)
g2 = tape2.gradient(Ltot, [self.lprox1.variables[0],self.lprox1.variables[1],self.lprox2.variables[0],self.lprox2.variables[1],self.lprox3.variables[0],self.lprox3.variables[1],self.outprox.variables[0],self.outprox.variables[1]])
L_append = Ltot.numpy()
self.loss.append(L_append)
return g1, g2, Lswap
# perform gradient descent
def network_learn(self,Y1_dataset,Y2_dataset,Graphs):
idx = 0
for i ,j in zip(Y1_dataset,Y2_dataset):
numnode = Graphs[idx].number_of_nodes()
g1, g2, Lswap = self.get_grad(i,j,numnode)
self.train_op.apply_gradients(zip(g2, [self.lprox1.variables[0],self.lprox1.variables[1],self.lprox2.variables[0],self.lprox2.variables[1],self.lprox3.variables[0],self.lprox3.variables[1],self.outprox.variables[0],self.outprox.variables[1]]))
self.train_op.apply_gradients(zip(g1, [self.l1.variables[0],self.l1.variables[1],self.l2.variables[0],self.l2.variables[1],self.l3.variables[0],self.l3.variables[1],self.out.variables[0],self.out.variables[1]]))
self.rnn_epoch(i,j,numnode,Lswap)
# self.rnn_apply_grad(self.optimizer_output, self.optimizer_rnn, self.scheduler_output, self.scheduler_rnn)
idx += 1
def rnn_apply_grad(self, opt_out, opt_rnn, sch_out, sch_rnn):
opt_out.step()
opt_rnn.step()
sch_out.step()
sch_rnn.step()
def pred_to_sequence(self, Y1, numnode): # idx 클래스에서 어떻게 접근할지
pred_list = []
pred = self.run(Y1)
pred = PredPruning(pred, 0.6)
#pred = pred.numpy()
#pred = pred[:, : numnode**2]
pred = pred[ : numnode**2]
pred = pred.reshape(numnode,numnode)
w = np.triu(pred, k = 1)
pred = w + w.T
Gpred = nx.from_numpy_array(pred)
#Gpred = nx.grid_2d_graph(4,4)
pred_list.append(Gpred)
inputseq = Graph_sequence_sampler_pytorch(pred_list, max_prev_node= self.maxprev, max_num_node=self.maxnumnode)
seq_loader = torch.utils.data.DataLoader(inputseq, batch_size=32, num_workers=0) #sampler=sample_strategy)
return seq_loader
``` |
{
"source": "jinder1s/pytest-opynions",
"score": 3
} |
#### File: pytest-opynions/pytest_repo_health/__init__.py
```python
from typing import Union
__version__ = "2.2.4"
def health_metadata(parent_path: list, output_keys: dict):
"""
Make a decorator that attaches metadata to the target function.
``output_keys`` is a dictionary that documents the output keys of the checker.
Each key is a key-path into the ``all_results`` dictionary, relative to ``parent_path``.
If the key is a string, it is appended to the parent path,
otherwise it extends the parent path.
The ``parent_path``, then, is just a list of strings that is the prefix
of all the output keys that are generated by the checker.
Each output-key value is a dictionary containing documentation of that key
under the key ``'doc'``.
"""
# Build full path for each output key, based on the parent path.
expanded_output_keys = {}
for k, v in output_keys.items():
# String key equivalent to a path of just one element
key_more = [k] if isinstance(k, str) else list(k)
key_path = tuple(parent_path + key_more)
expanded_output_keys[key_path] = v
def health_metadata_decorator(func):
"""Add metadata to function documenting the output keys it generates."""
func.__dict__['pytest_repo_health'] = {
'output_keys': expanded_output_keys
}
return func
return health_metadata_decorator
def add_key_to_metadata(output_key: Union[str, tuple]):
"""
Designed for checks which only define one key
The decorator will assume the docstring for function is the docstring for key
and will add this info into func.__pytest_repo_health__
Warning: output_key has to be hashable, currectly assumed to be a tuple with each level listed:
key = (first_key, second_key, final_key)
"""
# Build full path for each output key, based on the parent path.
def health_metadata_decorator(func):
"""Add metadata to function documenting the output keys it generates."""
final_output_key = tuple([output_key]) if isinstance(output_key, str) else output_key
func.__dict__['pytest_repo_health'] = {
'output_keys': {final_output_key:func.__doc__.strip()}
}
return func
return health_metadata_decorator
```
#### File: pytest-opynions/tests/__init__.py
```python
from pathlib import Path
PYTEST_INI = """
[pytest]
addopts = --repo-health --repo-health-path {checks_path} --repo-path {repo_path} --repo-health-metadata {metadata_path}
"""
def run_checks(testdir, repo_path=None, metadata_path=None, **kwargs):
"""
Put the check file content for each provided kwarg key into check files under the
specified test directory, and then run them. Runs the checks against the root of
this repository by default, specify repo_path to run against a different directory.
Returns the pytester RunResult so the results can be examined.
"""
# Must put checks in a "repo_health" subdirectory to be collected
testdir.mkpydir("repo_health")
checks_path = Path(str(testdir.tmpdir)) / "repo_health"
# The testdir convenience methods for file creation only work in the base directory
for path, content in kwargs.items():
file_path = checks_path / f"check_{path}.py"
with open(str(file_path), "w") as f:
f.write(content)
if repo_path is None:
repo_path = Path(__file__).parent / ".."
if metadata_path is None:
metadata_path = ""
# Tell pytest where to find the checks and to run them on the real repository root
testdir.makefile(".ini", pytest=PYTEST_INI.format(checks_path=str(checks_path),
repo_path=str(repo_path),
metadata_path=metadata_path))
return testdir.runpytest()
```
#### File: pytest-opynions/tests/test_utils.py
```python
import git
from pytest_repo_health.utils import get_repo_remote_name
def test_get_repo_remote_name_on_non_git_dir(tmpdir):
"""
Verify that the non git directory returns None on getting origin URL through get_repo_remote_name
"""
repo_dir = tmpdir / "target-repo"
response = get_repo_remote_name(repo_dir)
assert response is None
def test_get_repo_remote_name_without_origin_set(tmpdir):
"""
Verify that the git repository without remote "origin" set returns None
on getting origin URL through get_repo_remote_name
"""
repo_dir = tmpdir / "target-repo"
repo = git.Repo.init(repo_dir)
response = get_repo_remote_name(repo_dir)
assert response is None
def test_get_repo_remote_name_with_http_origin(tmpdir):
"""
Verify that the origin URL is retrieved through get_repo_remote_name on valid git repository with origin set
"""
url = "https://github.com/edx/pytest-repo-health.git"
repo_dir = tmpdir / "target-repo"
repo = git.Repo.init(repo_dir)
repo.create_remote("origin", url=url)
response = get_repo_remote_name(repo_dir)
assert response == 'pytest-repo-health'
def test_get_repo_remote_name_with_http_origin_without_git(tmpdir):
"""
Verify that the origin URL is retrieved through get_repo_remote_name on valid git repository with origin set
"""
url = "https://github.com/edx/pytest-repo-health"
repo_dir = tmpdir / "target-repo"
repo = git.Repo.init(repo_dir)
repo.create_remote("origin", url=url)
response = get_repo_remote_name(repo_dir)
assert response == 'pytest-repo-health'
def test_get_repo_remote_name_with_ssh_origin(tmpdir):
"""
Verify that the origin URL is retrieved through get_repo_remote_name on valid git repository with origin set
"""
url = "<EMAIL>:edx/pytest-repo-health.git"
repo_dir = tmpdir / "target-repo"
repo = git.Repo.init(repo_dir)
repo.create_remote("origin", url=url)
response = get_repo_remote_name(repo_dir)
assert response == 'pytest-repo-health'
def test_get_repo_remote_name_with_ssh_origin_without_git(tmpdir):
"""
Verify that the origin URL is retrieved through get_repo_remote_name on valid git repository with origin set
"""
url = "<EMAIL>:edx/pytest-repo-health"
repo_dir = tmpdir / "target-repo"
repo = git.Repo.init(repo_dir)
repo.create_remote("origin", url=url)
response = get_repo_remote_name(repo_dir)
assert response == 'pytest-repo-health'
def test_get_repo_remote_name_with_dot_in_name(tmpdir):
"""
Regression test for edx/.github repo.
"""
url = "https://github.com/edx/.github.git"
repo_dir = tmpdir / "target-repo"
repo = git.Repo.init(repo_dir)
repo.create_remote("origin", url=url)
response = get_repo_remote_name(repo_dir)
assert response == '.github'
``` |
{
"source": "JindeShubhamA/Image_captioning",
"score": 3
} |
#### File: Image_captioning/Attention/modules.py
```python
import os, sys
sys.path.append(os.path.dirname(os.getcwd()))
# Imports
import tensorflow as tf
class SoftAttention(tf.keras.Model):
def __init__(self, units):
"""
units: number of internal units per layer
"""
super(SoftAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
"""
features: features observed from image
hidden: hidden state of the decoder network (RNN) from previous iteration
"""
# print("INSIDE ATTENTION MODULE")
# print("Features are in Attention",features.shape)
# print("hidden ",hidden.shape)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# print("Hidden with time axis ", hidden_with_time_axis.shape)
# score shape == (batch_size, 64, hidden_size)
features_W1 = self.W1(features)
# print("Features W1",features_W1.shape)
hidden_with_time_axis_W2 = self.W2(hidden_with_time_axis)
# print("Hidden with time axis W2",hidden_with_time_axis_W2.shape)
sum_check = features_W1+hidden_with_time_axis_W2
# print("Sum check",sum_check.shape)
score= tf.nn.tanh(sum_check)
#score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
# print("Score is ",score.shape)
# attention_weights shape == (batch_size, 64, 1)
# you get 1 at the last axis because you are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# print("Attention weights ",attention_weights.shape)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * features
# print("Context vector",context_vector.shape)
context_vector = tf.reduce_sum(context_vector, axis=1)
# print("Context Vector after reduce sum",context_vector.shape)
return context_vector, attention_weights
class HardAttention(tf.keras.Model):
# TODO 1: Define custom loss function?
# TODO 2: Include running average b_k
# TODO 3: Add entropy H[s]
def __init__(self, units):
"""
units: number of internal units per layer
"""
super(HardAttention, self).__init__()
self.feature_weights = tf.keras.layers.Dense(units)
self.hidden_weights = tf.keras.layers.Dense(units)
self.attention_weights = tf.keras.layers.Dense(1)
def call(self, features, hidden):
"""
features: features observed from image, output of encoder, shape: (batch_size, num_features, embedding_dim)
hidden: hidden state of the decoder network (RNN) from previous iteration, shape: (batch_size, hidden_size)
"""
# hidden_expanded, shape: (batch_size, 1, hidden_size)
hidden_expanded = tf.expand_dims(hidden, 1)
# Calculate unnormalized Attention weights;
# unnormal_attent_weights, shape: (batch_size, num_features, hidden_size)
unnormal_attent_weights = tf.nn.tanh(self.feature_weights(features) + self.hidden_weights(hidden_expanded))
# Normalize Attention weights to turn them into a probability-distribution;
# attention_weights_alpha, shape: (batch_size, num_features, 1)
attention_weights_alpha = tf.nn.softmax(self.attention_weights(unnormal_attent_weights), axis=1)
# Select index of feature to attend, i.e. Attention location
# attention_location_s, shape = scalar = ();
if tf.squeeze(tf.argmax(tensorflow_probability.distributions.Multinomial(total_count=1., probs=[0.5,0.5]))) == 0:
# With 50% chance, set the sampled Attention location s to its expected value alpha
attention_location_s = tf.squeeze(tf.argmax(attention_weights_alpha, axis=-1))
else:
# Select feature based on stochastic sampling from Multinoulli (categorical) distribution with probabilities attention_weights_alpha
one_hot_selection = tensorflow_probability.distributions.Multinomial(total_count=1., probs=attention_weights_alpha)
attention_location_s = tf.squeeze(tf.argmax(one_hot_selection, axis=-1))
# Construct context vector by selecting stochastically chosen feature to pay Attention to;
# context_vector_z, shape after selection of feature: (batch_size, embedding_dim)
context_vector_z = features[attention_location_s,:]
return context_vector_z, attention_weights_alpha
``` |
{
"source": "jindl465/DSFD-Pytorch-Inference",
"score": 2
} |
#### File: face_detection/retinaface/detect.py
```python
import torch
import numpy as np
from .. import torch_utils
import typing
from .models.retinaface import RetinaFace
from ..box_utils import batched_decode
from .utils import decode_landm
from .config import cfg_mnet, cfg_re50
from .prior_box import PriorBox
from torch.hub import load_state_dict_from_url
from torchvision.ops import nms
from ..base import Detector
from ..build import DETECTOR_REGISTRY
class RetinaNetDetector(Detector):
def __init__(
self,
model: str,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if model == "mobilenet":
cfg = cfg_mnet
state_dict = load_state_dict_from_url(
"https://folk.ntnu.no/haakohu/RetinaFace_mobilenet025.pth",
map_location=torch_utils.get_device()
)
else:
assert model == "resnet50"
cfg = cfg_re50
state_dict = load_state_dict_from_url(
"https://folk.ntnu.no/haakohu/RetinaFace_ResNet50.pth",
map_location=torch_utils.get_device()
)
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
net = RetinaFace(cfg=cfg)
net.eval()
net.load_state_dict(state_dict)
self.cfg = cfg
self.net = net.to(self.device)
self.mean = np.array([104, 117, 123], dtype=np.float32)
def batched_detect_with_landmarks(
self, image: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:
"""Takes N images and performs and returns a set of bounding boxes as
detections
Args:
image (np.ndarray): shape [N, height, width, 3]
Returns:
np.ndarray: shape [N, 5] with (xmin, ymin, xmax, ymax, score)
np.ndarray: shape [N, 5, 2] with 5 landmarks with (x, y)
"""
image = image.astype(np.float32) - self.mean
image = np.moveaxis(image, -1, 1)
image = torch.from_numpy(image).to(self.device)
height, width = image.shape[2:]
boxes, landms = self._detect(image, return_landmarks=True)
scores = boxes[:, :, -1]
boxes = boxes[:, :, :-1]
final_output_box = []
final_output_landmarks = []
for i in range(len(boxes)):
boxes_ = boxes[i]
landms_ = landms[i]
scores_ = scores[i]
# Confidence thresholding
keep_idx = scores_ >= self.confidence_threshold
boxes_ = boxes_[keep_idx]
scores_ = scores_[keep_idx]
landms_ = landms_[keep_idx]
# Non maxima suppression
keep_idx = nms(
boxes_, scores_, self.nms_iou_threshold)
boxes_ = boxes_[keep_idx]
scores_ = scores_[keep_idx]
landms_ = landms_[keep_idx]
# Scale boxes
boxes_[:, [0, 2]] *= width
boxes_[:, [1, 3]] *= height
# Scale landmarks
landms_ = landms_.cpu().numpy().reshape(-1, 5, 2)
landms_[:, :, 0] *= width
landms_[:, :, 1] *= height
dets = torch.cat(
(boxes_, scores_.view(-1, 1)), dim=1).cpu().numpy()
final_output_box.append(dets)
final_output_landmarks.append(landms_)
return final_output_box, final_output_landmarks
@torch.no_grad()
def _detect(
self, image: np.ndarray,
return_landmarks=False) -> np.ndarray:
"""Batched detect
Args:
image (np.ndarray): shape [N, H, W, 3]
Returns:
boxes: list of length N with shape [num_boxes, 5] per element
"""
image = image[:, [2, 1, 0]]
loc, conf, landms = self.net(image) # forward pass
scores = conf[:, :, 1:]
height, width = image.shape[2:]
priorbox = PriorBox(
self.cfg, image_size=(height, width))
priors = priorbox.forward()
priors = torch_utils.to_cuda(priors, self.device)
prior_data = priors.data
boxes = batched_decode(loc, prior_data, self.cfg['variance'])
boxes = torch.cat((boxes, scores), dim=-1)
if return_landmarks:
landms = decode_landm(landms, prior_data, self.cfg['variance'])
return boxes, landms
return boxes
@DETECTOR_REGISTRY.register_module
class RetinaNetResNet50(RetinaNetDetector):
def __init__(self, *args, **kwargs):
super().__init__("resnet50", *args, **kwargs)
@DETECTOR_REGISTRY.register_module
class RetinaNetMobileNetV1(RetinaNetDetector):
def __init__(self, *args, **kwargs):
super().__init__("mobilenet", *args, **kwargs)
``` |
{
"source": "JindongGu/SimDis",
"score": 2
} |
#### File: SimDis/utils/__init__.py
```python
import os
import math
import torch
import shutil
def adjust_learning_rate_iter(optimizer, iters, args, ITERS_PER_EPOCH=5004):
"""Decay the learning rate based on schedule"""
total_iters = ITERS_PER_EPOCH * args.total_epochs
lr = args.lr
if args.scheduler == "cos": # cosine lr schedule
lr *= 0.5 * (1.0 + math.cos(math.pi * iters / total_iters))
elif args.scheduler == "warmcos":
warmup_total_iters = ITERS_PER_EPOCH * args.warmup_epochs
if iters <= warmup_total_iters:
lr_start = 1e-6
lr = (lr - lr_start) * iters / float(warmup_total_iters) + lr_start
else:
lr *= 0.5 * (1.0 + math.cos(math.pi * (iters - warmup_total_iters) / (total_iters - warmup_total_iters)))
elif args.scheduler == "multistep": # stepwise lr schedule
milestones = [int(total_iters * milestone / args.total_epochs) for milestone in [90, 120]]
for milestone in milestones:
lr *= 0.2 if iters >= milestone else 1.0
elif args.scheduler == "constant": # lr schedule
return lr
else:
raise ValueError("Scheduler version {} not supported.".format(args.scheduler))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
def save_checkpoint(state, is_best, save, model_name="", linear=False):
if linear: model_name += '_linear'
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, model_name + "_ckpt.pth.tar")
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, model_name + "_best_ckpt.pth.tar")
shutil.copyfile(filename, best_filename)
def load_checkpoint(args, file_name, model, optimizer, linear=False, tea=False):
start_epoch = 1
save_file = os.path.join(file_name, 'last_epoch_ckpt.pth.tar')
if not linear and os.path.isfile(save_file):
state = torch.load(save_file, map_location='cpu')
start_epoch = state['start_epoch'] + 1
model.load_state_dict(state['model'])
optimizer.load_state_dict(state['optimizer'])
del state
if args.rank == 0: print("=> loaded successfully, training starts from (epoch {})".format(start_epoch))
elif linear:
save_file_linear = os.path.join(file_name, 'last_epoch_linear_ckpt.pth.tar')
if os.path.isfile(save_file_linear):
state = torch.load(save_file_linear, map_location='cpu')
start_epoch = state['start_epoch'] + 1
model.load_state_dict(state['model'])
optimizer.load_state_dict(state['optimizer'])
del state
if args.rank == 0: print("=> loaded successfully, training starts from (epoch {})".format(start_epoch))
elif os.path.isfile(save_file):
state = torch.load(save_file, map_location='cpu')
new_state = {}
for k, v in state['model'].items():
if (not tea) and ('student' in k) and ('student_ema' not in k):
new_state[k.replace("student", "encoder")] = v
elif tea and ('teacher' in k) and ('teacher_ema' not in k):
new_state[k.replace("teacher", "encoder")] = v
model.load_state_dict(new_state, strict=False)
del state
if args.rank == 0: print("=> loaded successfully, training starts from (epoch {})".format(start_epoch))
else:
if args.rank == 0: print("=> no checkpoint found from ", save_file)
else:
if args.rank == 0: print("=> no checkpoint found from ", save_file)
return start_epoch, model, optimizer
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def parse_devices(gpu_ids):
if "-" in gpu_ids:
gpus = gpu_ids.split("-")
gpus[0] = int(gpus[0])
gpus[1] = int(gpus[1]) + 1
parsed_ids = ",".join(map(lambda x: str(x), list(range(*gpus))))
return parsed_ids
else:
return gpu_ids
class AvgMeter(object):
def __init__(self):
self.avg = 0
self.sum = 0
self.cnt = 0
self.reset()
self.val = 0
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
``` |
{
"source": "JindongJiang/DeepPose_PyTorch",
"score": 2
} |
#### File: JindongJiang/DeepPose_PyTorch/humanpose_eval.py
```python
import argparse
import os
import numpy as np
import csv
from torch.utils.data import DataLoader
import torch.optim
import torchvision.transforms as transforms
from torch.autograd import Variable
import humanpose_net
import humanpose_data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch human pose evaluation')
parser.add_argument('--lsp-root', metavar='DIR',
help='path to root of lsp dataset')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=100, type=int,
metavar='N', help='mini-batch size (default: 100)')
parser.add_argument('--ckpt-dir', default='', type=str, metavar='PATH',
help='path to checkpoint')
parser.add_argument('--num-eval', default=1, type=int, metavar='N',
help='How many models to be evaluate')
parser.add_argument('--no-other-print', action='store_true', default=False,
help='disables printing other than pure losses')
parser.add_argument('--training-eval', action='store_true', default=False,
help='Evaluate for training data')
parser.add_argument('--testing-eval', action='store_true', default=False,
help='Evaluate for testing data')
parser.add_argument('--all', action='store_true', default=False,
help='Evaluate all models')
parser.add_argument('--eval-log', action='store_true', default=False,
help='Generate log file')
parser.add_argument('--pred-log', action='store_true', default=False,
help='Write down every output as well as groundtruth')
args = parser.parse_args()
args.cuda = (args.cuda and torch.cuda.is_available())
assert args.training_eval or args.testing_eval, 'Must one or both of --training-eval and --testing-eval'
image_w = 224
image_h = 224
def eval():
if args.cuda:
torch.backends.cudnn.benchmarks = True
model_names = np.array([s for s in os.listdir(args.ckpt_dir) if
s.startswith('ckpt_epoch_')])
model_nums = np.array([float(s.rsplit('_')[2]) for s in model_names])
sorted_models = model_names[np.argsort(model_nums)]
sorted_nums = model_nums[np.argsort(model_nums)]
if args.all:
args.num_eval = len(sorted_models)
model = humanpose_net.HumanPoseNet(28, vgg_pretrained=False)
model.eval()
if args.cuda:
model.cuda()
if args.testing_eval:
test_data = humanpose_data.LSPDataset(args.lsp_root,
transform=transforms.Compose([humanpose_data.Scale(image_h, image_w),
humanpose_data.ToTensor(),
humanpose_data.Normalize()]),
phase_train=False,
weighted_loss=False)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True if args.cuda else False)
if args.training_eval:
train_data = humanpose_data.LSPDataset(args.lsp_root,
transform=transforms.Compose([humanpose_data.Scale(image_h, image_w),
humanpose_data.ToTensor(),
humanpose_data.Normalize()]),
phase_train=True,
weighted_loss=False)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True if args.cuda else False)
for model_epoch, model_file in zip(sorted_nums[-args.num_eval:], sorted_models[-args.num_eval:]):
load_ckpt(model, os.path.join(args.ckpt_dir, model_file),
args.no_other_print)
if args.testing_eval:
evaluation(model, test_loader, model_epoch, args.ckpt_dir, phase_cuda=args.cuda,
testing_data=True, phase_eval_log=args.eval_log,
phase_pred_log=args.pred_log)
if args.training_eval:
evaluation(model, train_loader, model_epoch, args.ckpt_dir, phase_cuda=args.cuda,
testing_data=False, phase_eval_log=args.eval_log,
phase_pred_log=args.pred_log)
def load_ckpt(model, model_file, no_other_print=False):
try:
if not no_other_print:
print("=> loading checkpoint '{}'".format(model_file))
if args.cuda:
checkpoint = torch.load(model_file)
else:
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
if not no_other_print:
print("=> loaded checkpoint '{}' (epoch {})"
.format(model_file, checkpoint['epoch']))
except FileNotFoundError:
print("=> no checkpoint found at '{}'".format(model_file))
os._exit(0)
def evaluation(model, data_loader, model_epoch, ckpt_dir, phase_cuda, writer=None,
testing_data=True, phase_eval_log=False, phase_pred_log=False):
phase_data = 'testing' if testing_data else 'traning'
total_loss = 0
num_allpoints = 0
for batch_idx, sample in enumerate(data_loader):
image = Variable(sample['image'].cuda() if phase_cuda else sample['image'], volatile=True)
lm = Variable(sample['landmarks'].cuda() if phase_cuda else sample['landmarks'], volatile=True)
weight = Variable(sample['weight'].cuda() if phase_cuda else sample['weight'], volatile=True)
imagefile = sample['imagefile']
num_allpoints += (weight.clone().cpu().data.numpy() != 0).sum()
pred = model(image)
loss = humanpose_net.mse_loss(pred, lm, weight, weighted_loss=False,
size_average=False)
total_loss += loss.data[0]
if phase_pred_log:
with open(os.path.join(ckpt_dir,
'logfile/cp_{:0.2f}_{}_pred.csv'.format(model_epoch, phase_data)),
mode='a') as f:
wtr = csv.writer(f)
wtr.writerows(np.hstack([np.array(imagefile)[:, np.newaxis],
pred.clone().cpu().data.numpy(),
lm.clone().cpu().data.numpy(),
weight.clone().cpu().data.numpy()]))
loss = total_loss / num_allpoints
if phase_eval_log:
with open(os.path.join(ckpt_dir, 'logfile/{}_eval_log.csv'.format(phase_data)), mode='a') as f:
f.write('{:0.2f},{}\n'.format(model_epoch, loss))
if writer is not None:
writer.add_scalar('Testing data evaluation loss', loss, global_step=int(model_epoch))
print('epochs {:0.2f} {} loss {:1.5f}'.format(model_epoch, phase_data, loss))
if __name__ == '__main__':
if not os.path.exists(os.path.join(args.ckpt_dir, 'logfile')):
os.mkdir(os.path.join(args.ckpt_dir, 'logfile'))
eval()
``` |
{
"source": "jindongyang94/anti-spoofing",
"score": 2
} |
#### File: src/data/make_dataset_aws.py
```python
import os
from pprint import pformat, pprint
import boto3
import fire
from tqdm import tqdm, trange
from modules.aws_helper import S3Helper
from modules.config import (DATALAKE_NAME, PROFILEIMG_FOLDER,
EXTERNAL_DATA_DIR, logger)
"""
The check in / check out pictures are placed in:
Company --> hubble/attendances/attendance/(check_in_photo or check_out_photo)/(numeric id)/(check_in or check_out_photo.jpg)
or (thumb_check_in or out_photo.jpg
For now, we can ignore the thumbnails as we want to have higher resolution pictures.
The thumbnails will be useful if we want to train the model with lower resolution pictures to increase robustness.
"""
## ------------------------------------------------------------------------------------------------------------------------------------------------------------- ##
## AWS Download
## ------------------------------------------------------------------------------------------------------------------------------------------------------------- ##
def migrate_pictures():
"""
This function is the main function to migrate all buckets has check in and check out photos to a centralized location denoted
in the helper script: PROFILEIMG_FOLDER
"""
s3 = S3Helper()
buckets_dict = list_buckets()
# Print Bucket List
for line in pformat(buckets_dict).split('\n'):
logger.info(line)
for key, values in buckets_dict.items():
logger.info('Migrating for %s pictures' % key)
bar = tqdm(values, dynamic_ncols=True, desc='Bar desc', leave=True)
for keypaths in bar:
keypath = list(filter(lambda x: str(x.split('/')[-1]) == key, keypaths))
if keypath and len(keypath) == 1:
bucketname = keypath[0]
else:
logger.error("Keypath Filter is wrong. Keypath: %s" % keypath)
break
oldkeypaths = s3.list_objects(bucketname, keypath)
# logger.info(oldkeypaths)
for oldkeypath in oldkeypaths:
# Remove all thumb photos
if 'thumb' in str(oldkeypath.split('/')[-1]):
continue
full_oldkeypath = bucketname + '/' + oldkeypath
bar.set_description(full_oldkeypath)
# logger.info(full_oldkeypath)
newimagename = bucketname + '_' + oldkeypath.split('/')[-2] + '_' + key + '.' + oldkeypath.split('.')[-1]
full_newkeypath = DATALAKE_NAME + '/' + PROFILEIMG_FOLDER + '/' + newimagename
success = s3.move_file(full_oldkeypath, full_newkeypath)
if not success:
logger.info("Unsuccessful Transfer")
bar.refresh()
return
def list_buckets():
"""
Simply a wrapper function to run it specifically for this case.
"""
check_in_key = "hubble/attendances/attendance/check_in_photo"
check_out_key = "hubble/attendances/attendance/check_out_photo"
paths = [check_in_key, check_out_key]
return __list_buckets(paths)
def download_images(folder, start_index, limit):
"""
Simply a wrapper function to call what is already defined.
"""
local_folderpath = os.path.join(EXTERNAL_DATA_DIR, folder)
logger.info(__download_images(DATALAKE_NAME, PROFILEIMG_FOLDER, local_folderpath, start_index=start_index, limit=limit))
## ------------------------------------------------------------------------------------------------------------------------------------------------------
## Sub Functions
## ------------------------------------------------------------------------------------------------------------------------------------------------------
def __list_buckets(paths):
"""
This function is to check every bucket if there is check_in / check_out folder present for us to consider
so that we can use it to migrate the folders.
"""
keys = list(map(lambda x: str(x.split('/')[-1]), paths))
logger.info(keys)
s3 = S3Helper()
# List all the buckets in the system
buckets = s3.client.list_buckets()['Buckets']
bucketnames = list(map(lambda x: x['Name'], buckets))
bucket_dict = {x : [] for x in keys}
logger.info("Empty Dict: %s" % bucket_dict)
bar = tqdm(bucketnames, dynamic_ncols=True, desc='Bar desc', leave=True)
for bucketname in bar:
bar.set_description('Accessing %s' % bucketname)
for keypath in paths:
if s3.check_folder_in_bucket(bucketname, keypath):
# logger.info('%s has Check In Photos.' % bucketname)
key = str(keypath.split('/')[-1])
bucket_dict[key].append(bucketname)
bar.refresh()
return bucket_dict
def __download_images(bucketname, folderpath, location, start_index = 0, limit=100):
"""
This function is simply gonna download the first 100 images (maybe next time done in random?)
from the bucket to this repo under 'external/'
"""
s3 = S3Helper()
keypaths = s3.list_objects(bucketname, folderpath)
logger.info('Starting Downloading Images...')
index = start_index
bar = trange(limit, dynamic_ncols=True, desc='Bar desc', leave=True)
for i in bar:
keypath = keypaths[index]
local_imagepath = location + '/' + str(keypath.split('/')[-1])
bar.set_description(keypath)
s3.download(keypath, bucketname, local_imagepath)
bar.refresh()
index += 1
files = []
for r, d, f in os.walk(folderpath):
for file in f:
if '.jpg' in file:
files.append(os.path.join(r, file))
statement = "There are %s images in %s" % (len(files), location)
return statement
if __name__ == "__main__":
# Fire is a library for automatically generating command line interfaces (CLIs) from absolutely any Python object
fire.Fire()
```
#### File: src/data/make_dataset_video.py
```python
import argparse
import os
from glob import glob
import cv2
import numpy as np
from modules.config import (EXTERNAL_DATA_DIR, logger, MODELS_DIR,
PROCESSED_DATA_DIR, WORKING_DIR, DETECTORS_DIR, find_model)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------- ##
# Video Processing
## ------------------------------------------------------------------------------------------------------------------------------------------------------------- ##
def bulk_processing(args):
"""
Bulk Process all Images in the folder
"""
# load our serialized face detector from disk
print("[INFO] loading face detector...")
face_detector_path = os.path.join(DETECTORS_DIR, args['detector'])
protoPath = find_model(face_detector_path, 'prototxt')
modelPath = find_model(face_detector_path, "caffemodel")
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# Split the videos into two batches: real and fake
video_dir = os.path.join(EXTERNAL_DATA_DIR, args['input'])
video_sub_folders = os.path.sep.join([video_dir, '*/'])
video_sub_folders = glob(video_sub_folders)
for sub_folder in video_sub_folders:
# Detect video type and dataset path
videotype = str(sub_folder.split('/')[-2])
images_location = os.path.sep.join([PROCESSED_DATA_DIR, args['input'], videotype])
# Iterate through all videos in each folder
videos = glob(os.path.sep.join([sub_folder, '*.mov']))
videos.extend(glob(os.path.sep.join([sub_folder, '*.mp4'])))
# number of frames saved thus far
saved = 0
# open up existing images in the current folder and append to it instead of overwriting it
images = glob(os.path.sep.join([images_location, "*.png"]))
images.extend(glob(os.path.sep.join([images_location, '*.jpg'])))
if args['reset']:
for im in images:
os.remove(im)
else:
saved = len(images)
for video in videos:
# open a pointer to the video file stream and initialize the total
# number of frames read thus far for skipping
vs = cv2.VideoCapture(video)
read = 0
# loop over frames from the video file stream
while True:
# grab the frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end of the stream
if not grabbed:
break
# increment the total number of frames read thus far
read += 1
# check to see if we should process this frame
if read % args["skip"] != 0:
continue
# grab the frame dimensions and construct a blob from the frame
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and predictions
net.setInput(blob)
detections = net.forward()
# ensure at least one face was found
if len(detections) > 0:
# we're making the assumption that each image has only ONE
# face, so find the bounding box with the largest probability
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
# ensure that the detection with the largest probability also
# means our minimum probability test (thus helping filter out
# weak detections)
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the face and extract the face ROI
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
# write the frame to disk
p = os.path.sep.join(
[images_location, "{}.png".format(saved)])
cv2.imwrite(p, face)
saved += 1
print("[INFO] saved {} to disk".format(p))
# do a bit of cleanup
vs.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, required=True,
help="path to input folder to all the videos")
ap.add_argument("-r", "--reset", type=int, default=0,
help="Option to delete all given images in the ")
ap.add_argument("-d", "--detector", type=str, required=True,
help="path to OpenCV's deep learning face detector")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip", type=int, default=16,
help="# of frames to skip before applying face detection")
args = vars(ap.parse_args())
bulk_processing(args)
```
#### File: src/models/predict.py
```python
import os
import pickle
import shutil
import time
from glob import glob
from os.path import abspath, dirname, join
import cv2
import fire
import imutils
import numpy as np
from imutils.video import VideoStream
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from tqdm import tqdm
from modules.aws_helper import S3Helper
from modules.config import (DATALAKE_NAME, DETECTORS_DIR, EXTERNAL_DATA_DIR,
INTERIM_DATA_DIR, LABELS_DIR, NN_MODELS_DIR,
PROFILEIMG_FOLDER, WORKING_DIR, find_model, logger)
from modules.nn_predict_helper import (label_with_face_detector_original,
label_with_face_detector_ultra)
def video_demo(model, le, detector, confidence=0.9):
"""
provide video live demo to check if the model works.
"""
args = {
'model': model,
'detector': detector,
'le': le,
'confidence': confidence
}
# load our serialized face detector from disk
print("[INFO] loading face detector...")
face_detector_path = os.path.join(DETECTORS_DIR, args['detector'])
protoPath = find_model(face_detector_path, 'prototxt')
modelPath = find_model(face_detector_path, "caffemodel")
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# Load the liveness detector model and label encoder from disk
print("[INFO] loading liveness detector...")
classifiermodelpath = os.path.join(
NN_MODELS_DIR, args['model'])
model = load_model(classifiermodelpath)
le = pickle.loads(
open(os.path.join(LABELS_DIR, args["le"]), "rb").read())
# initialize the video stream and allow the camera sensor to warmup
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 600 pixels
frame = vs.read()
if args['detector'] == 'face_RFB':
frame, _, _ = label_with_face_detector_ultra(frame, net, model, le, args['confidence'])
else:
frame, _, _ = label_with_face_detector_original(frame, net, model, le, args['confidence'])
# show the output frame and wait for a key press
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
def classify_images(location, detector, model, le, confidence=0.9):
"""
From a image folder location:
1. Create a real and fake image folder in the current image folder itself. (Only if there aren't such a folder)
2. Classify the images into real and fake and store them within the created folders.
"""
args = {
'detector': detector,
'model': model,
'le': le,
'confidence': confidence
}
# Create Folders
real_location = os.path.join(INTERIM_DATA_DIR, location, 'real')
fake_location = os.path.join(INTERIM_DATA_DIR, location, 'fake')
noface_location = os.path.join(INTERIM_DATA_DIR, location, 'noface')
if not glob(real_location):
os.mkdir(real_location)
if not glob(fake_location):
os.mkdir(fake_location)
if not glob(noface_location):
os.mkdir(noface_location)
# Load Models
# Load our serialized face detector from disk
print("[INFO] loading face detector...")
face_detector_path = os.path.join(DETECTORS_DIR, args['detector'])
protoPath = find_model(face_detector_path, 'prototxt')
modelPath = find_model(face_detector_path, "caffemodel")
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# Load the liveness detector model and label encoder from disk
print("[INFO] loading liveness detector...")
classifiermodelpath = os.path.join(NN_MODELS_DIR, args['model'])
model = load_model(classifiermodelpath)
le = pickle.loads(
open(os.path.join(LABELS_DIR, args["le"]), "rb").read())
# Grab all images from given folder
unsorted_folder = os.path.join(EXTERNAL_DATA_DIR, location)
images = glob(os.path.join(unsorted_folder, '*.png'))
jpg_images = glob(os.path.join(unsorted_folder, '*.jpg'))
images.extend(jpg_images)
# Maintain counters for all types of images
real_counter = 0
fake_counter = 0
noface_counter = 0
bar = tqdm(images, dynamic_ncols=True, desc='Bar desc', leave=True)
for image in bar:
frame = cv2.imread(image)
if args['detector'] == 'face_RFB':
frame, finally_fake, detected_faces = label_with_face_detector_ultra(frame, net, model, le, args['confidence'], use_video=False)
else:
frame, finally_fake, detected_faces = label_with_face_detector_original(frame, net, model, le, args['confidence'], use_video=False)
# Relocate the image based on whether it is fake, real or noface
image_name = os.path.basename(image)
if detected_faces == 0:
image_location = os.path.join(noface_location, image_name)
noface_counter += 1
elif finally_fake:
image_location = os.path.join(fake_location, image_name)
fake_counter += 1
else:
image_location = os.path.join(real_location, image_name)
real_counter += 1
# Shift image to classified location
cv2.imwrite(image_location, frame)
# Delete image from unsorted location
os.remove(image)
image_folder_location = os.path.split(image_location)[0]
image_category = os.path.split(image_folder_location)[1]
bar.set_description(os.path.join(image_category, image_name))
bar.refresh()
logger.info('Real Images Classified: %s' % real_counter)
logger.info('Fake Images Classified: %s' % fake_counter)
logger.info('No Face Images Classified: %s' % noface_counter)
# Count present images in each folder location
total_real = len(glob(os.path.join(real_location, '*')))
total_fake = len(glob(os.path.join(fake_location, '*')))
total_noface = len(glob(os.path.join(noface_location, '*')))
logger.info('Real Images Present: %s' % total_real)
logger.info('Fake Images Present: %s' % total_fake)
logger.info('No Face Images Present: %s' % total_noface)
def classify_images_s3_local(s3bucket, s3folderpath, detector, model, le, confidence=0.5):
"""
This is simplified function where we will:
1. Download the s3 photos
2. Classify the photos locally
3. Delete the photos that are not spoofed
Repeat the process till all s3 images are done.
"""
pass
def classify_images_s3(s3bucket, s3folderpath, detector, model, le, confidence=0.5):
"""
This function will take in the location to the current s3 image folder, and separate the images within into folders within the given folder
i.e. s3 image folder --> hubble-datalake/images/profilepics
real images --> hubble-datalake/images/profilepics/real
fake images --> hubble-datalake/images/profilepics/fake
"""
pass
# --------------------------------------------------------------------------------------------------------------------------
# Sub Functions
# --------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
fire.Fire()
```
#### File: src/modules/config.py
```python
import os
import logging
from glob import glob
# Logger --------------------------------------------------------------------------------------
try:
import colorlog
HAVE_COLORLOG = True
except ImportError:
HAVE_COLORLOG = False
def create_logger():
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.INFO)
format_str = '%(asctime)s - %(levelname)-8s - %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
if HAVE_COLORLOG and os.isatty(2):
cformat = '%(log_color)s' + format_str
colors = {'DEBUG': 'reset',
'INFO': 'reset',
'WARNING': 'bold_yellow',
'ERROR': 'bold_red',
'CRITICAL': 'bold_red'}
formatter = colorlog.ColoredFormatter(cformat, date_format,
log_colors=colors)
else:
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
return logging.getLogger(__name__)
logger = create_logger()
# S3 LOCATIONS -------------------------------------------------------------------------------------------
DATALAKE_NAME = 'hubble-datalake1'
PROFILEIMG_FOLDER = 'images/profile_pics'
# LOCAL WORKING DIRECTORIES --------------------------------------------------------------------------------
WORKING_DIR = '/Users/jindongyang/Documents/repos/hubble/hubble_projects/hubble_spoofing_detection'
## DATA DIRECTORIES
EXTERNAL_DATA_DIR = os.path.join(WORKING_DIR, 'data/external')
INTERIM_DATA_DIR = os.path.join(WORKING_DIR, 'data/interim')
PROCESSED_DATA_DIR = os.path.join(WORKING_DIR, 'data/processed')
## MODELS DIRECTORIES
MODELS_DIR = os.path.join(WORKING_DIR, 'models')
NN_MODELS_DIR = os.path.join(MODELS_DIR, 'nn_models')
NN_WEIGHTS_DIR = os.path.join(MODELS_DIR, 'nn_pretrained_weights')
LABELS_DIR = os.path.join(MODELS_DIR, 'labels')
DETECTORS_DIR = os.path.join(MODELS_DIR, 'detectors')
def find_model(folder, extension):
"""
This is assuming there is only one of each extension each folder, which should be kept this case all the time.
"""
extension = '*.' + extension
dir = glob(os.path.join(folder, extension))
return dir[0]
## REPORTS DIRECTORIES
REPORTS_DIR = os.path.join(WORKING_DIR, 'reports')
FIGURES_DIR = os.path.join(REPORTS_DIR, 'figures')
```
#### File: src/modules/nn_train_helper.py
```python
import os
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Activation, Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from modules.config import NN_WEIGHTS_DIR
class Model:
def __init__(self, width, height, depth, classes):
self.inputShape = (height, width, depth)
self.chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
self.inputShape = (depth, height, width)
self.chanDim = 1
self.classes = classes
def build_liveness(self):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
# first CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(16, (3, 3), padding="same",
input_shape=self.inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=self.chanDim))
model.add(Conv2D(16, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=self.chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=self.chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=self.chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(self.classes))
model.add(Activation("softmax"))
# return the constructed network architecture
return model
def build_VGG(self):
# build the VGG16 network
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=self.inputShape))
model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
weights_path = os.path.join(NN_WEIGHTS_DIR, 'vgg16_weights.h5')
model.load_weights(weights_path, by_name=True)
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(self.classes, activation='sigmoid'))
# add the model on top of the convolutional base
model.add(top_model)
return model
``` |
{
"source": "jindongyang94/docker-kubernetes-airflow",
"score": 3
} |
#### File: db_migration/db_migration_to_s3/daily_migration.py
```python
import subprocess
import os
import re
import csv
import contextlib
from datetime import datetime, timedelta
import time
import progressbar
import boto3
import psycopg2
from db_migration.db_migration_lib.helper import RDSHelper, S3Helper, PGHelper, DATALAKE_NAME, logger, DATABASE_TAGS, INSTANCE_TAGS, TABLE_TAGS
"""
The idea of this script is to find the respective database instances using Boto3, and then find the
respective databases in the instance and finally find the respective tables in each database and do a iterative
export and dump one table at a time to prevent overloading of memory.
This process can be expedited by parallel processing but I am unsure of how to do so yet. Would figure out a way
if this becomes a pertinent issue.
Upload the file downloaded to s3 to the correct respective folders and buckets based on company
name. It is important to note that the files with the same name would be replaced. This would
help in not saving redundant files but might not be useful if we want to version.
Since tables will never be able to be appended directly from s3, it does not make sense to load the entire csv all the time.
Perhaps write another script to merge each csvs based on time periodically.
S3 files would be named as follows:
s3://{BucketName}/{InstanceName}/{DBName}/{TableName}/{TableName-TimeStamp}.csv
# This method allows me to connect to export csv files for each table.
# This method does not require the maintenance of a JSON file at all, just different AWS credentials
# needed for different servers if different users have different access to the databases.
"""
# List Individual DBs instance and their respective Database List -----------------------------------------------
def describe_all_instances():
rds = RDSHelper()
dbs = rds.describe_db_instances(filters=INSTANCE_TAGS)
db_dictionary = {}
for db in dbs:
instance = db['DBInstanceIdentifier']
user = db['MasterUsername']
endpoint = db['Endpoint']
host = endpoint['Address']
port = endpoint['Port']
location = str(db['DBInstanceArn'].split(':')[3])
logger.info("Accessing instance %s ..." % instance)
pg = PGHelper(dbname='postgres', host=host, port=port, user=user)
con = pg.conn()
cur = con.cursor()
def extract_name_query(title, qry):
logger.info('%s' % (title))
cur.execute(qry)
results = cur.fetchall()
result_names = list(map(lambda x: x[0], results))
return result_names
# List all available databases in the same instance
database_names = extract_name_query(
'Extracting databases...', 'SELECT * FROM pg_database')
# Filtering available databases
default_databases = ['postgres',
'rdsadmin', 'template1', 'template0']
database_names = list(
filter(lambda x: x not in default_databases, database_names))
if DATABASE_TAGS:
database_names = list(
filter(lambda x: x in DATABASE_TAGS, database_names))
# Save all the information based on key: DBInstance, value: [db, [list of databases extracted from the instance]]
db_dictionary[instance] = [db, database_names]
return db_dictionary
# Individual Company Database Migration -----------------------------------------------
def individual_company_migration(instance_details, database_name, table_filters):
instance = instance_details['DBInstanceIdentifier']
user = instance_details['MasterUsername']
endpoint = instance_details['Endpoint']
host = endpoint['Address']
port = endpoint['Port']
location = str(instance_details['DBInstanceArn'].split(':')[3])
pg = PGHelper(dbname='postgres', host=host, port=port, user=user, type_db='prod')
logger.info("Accessing %s ..." % database_name)
con = pg.conn(database=database_name)
cur = con.cursor()
def extract_name_query(title, qry):
logger.info('%s' % (title))
cur.execute(qry)
results = cur.fetchall()
result_names = list(map(lambda x: x[0], results))
return result_names
# List all available tables in the same instance
table_query = "SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE'"
table_names = extract_name_query('Extracting tables...', table_query)
# Filtering available tables
if table_filters:
table_names = list(
filter(lambda x: x in table_names, table_names))
# We should also filter away those tables that does not start with hubble as well: ['delayed_jobs', 'ar_internal_metadata', 'schema_migrations', 'audits']
# We are going to remove hubble_safety_permit_logs as well as it is too big to be exported at the moment.
misc_tables = ['delayed_jobs', 'ar_internal_metadata', 'schema_migrations', 'audits', 'hubble_safety_permit_logs']
table_names = list(
filter(lambda x: x not in misc_tables, table_names)
)
logger.info("Tables List: %s" % table_names)
# for table_name in table_names:
for j in range(len(table_names)):
table_name = table_names[j]
# # Rerun for the table when the exception fails
# try:
# Save individual tables to CSV first - as we are sending one table at a time, we can del the csv files
# as soon as we have uploaded them
logger.info("Accessing %s ..." % table_name)
# We will save the time based on the latest commit time. Thus, there will be only one file for one table all time
# However, they might be of different timestamp due to difference in commit time.
s3 = S3Helper()
# Extract latest timestamp separately here:
# Use this query to extract the latest commit timestamp at that point of time
extract_ts_query = "SELECT MAX(pg_xact_commit_timestamp(xmin)) FROM " + table_name + " WHERE pg_xact_commit_timestamp(xmin) IS NOT NULL;"
cur.execute(extract_ts_query)
latest_timestamp = str(cur.fetchone()[0])
# Define needed timestamp to set the csvname we are using.
if latest_timestamp and latest_timestamp != 'None':
logger.info ("Latest Commit Timestamp from PostGres is: %s" % latest_timestamp)
latest_csvtimestamp = s3._convert_s3timestamp(latest_timestamp)
# However, if there is no timestamp at all, then use 24 '0's as the default.
else:
logger.info ("No Commit Timestamp available in PostGres. Using default.")
latest_csvtimestamp = '0' * 24
csvname = table_name + "-" + latest_csvtimestamp + ".csv"
local_csvname = database_name + "-" + csvname
# Respective paths needed
full_folder_path = ("%s/%s/%s/%s") % (DATALAKE_NAME, instance, database_name, table_name)
full_table_path = "%s/%s/%s/%s/%s" % (DATALAKE_NAME, instance, database_name, table_name, csvname)
s3_path = ("s3://%s") % (full_table_path)
# Grab the latest_timestamp from the folder. Ideally, there should only be one file under each table folder, but
# we will still segregate them as such for easy referencing.
table_timestamp = s3.latest_s3timestamp(full_folder_path)
# If we could not get a proper timestamp from s3, it means there is no initial file.
if not table_timestamp:
logger.info ("No CSV found in the respective S3 folder. Exporting all rows from table %s to csv." % table_name)
local_csvpath = '/tmp/' + local_csvname
with open(local_csvpath, "w") as csvfile:
# Get all of the rows and export them
export_query = "COPY " + table_name + " TO STDOUT WITH CSV HEADER"
cur.copy_expert(export_query, csvfile)
else:
logger.info ("CSV File found with Commit Timestamp: %s." % table_timestamp)
# Since the timestamp is down to the last milisecond, it is almost impossible for it be miss any rows.
# Thus, to save processing time, we share ignore any need to update the table csv if the timestamp is the same.
table_csvtimestamp = s3._convert_s3timestamp(table_timestamp)
if table_csvtimestamp == latest_csvtimestamp:
logger.info ("The latest Commit Timestamp (%s) and the latest S3 Timestamp (%s) are the same. Proceeeding to next table."
% (latest_timestamp, table_timestamp))
logger.info('\n')
break
# If timestamp is 0000.. , we should just use the min datetime to prevent error.
if table_csvtimestamp == '0' * 24:
table_timestamp = datetime.min
# Get only the rows after the committed timestamp retrieved and append that to the current csv.
# If there is no results, just go to the next table
export_query = "SELECT * FROM " + table_name + " WHERE pg_xact_commit_timestamp(xmin) > %s "
cur.execute(export_query, (table_timestamp,))
results = cur.fetchall()
if not results:
logger.info ("No new rows or updates from the current Database.")
logger.info('\n')
break
# Download the file to local storage first, then utilizing it - always save it under /tmp/ directory
# The file will also be deleted from s3
local_csvpath = s3.download_latest(full_folder_path, local_csvname)
with open(local_csvpath, 'a') as csvfile:
# Append by downloading the existing csv and append locally.
logger.info ("Writing rows into current local CSV File...")
for row in results:
writer = csv.writer(csvfile)
writer.writerow(row)
# Upload the file to the respective bucket - Replacing or uploading uses the same function
# This way of uploading would not resetting the entire path, so it is fine to not add a check.
s3.create_folder(full_folder_path, location)
s3.upload(local_csvpath, full_table_path)
latest_timestamp = s3._convert_timestamp(latest_csvtimestamp)
logger.info ('FILE PUT AT: %s with Latest Committed Time (%s)' % (s3_path, latest_timestamp))
# Deleting file from /tmp/ after use
os.remove(local_csvpath)
logger.info ('Local File Deleted: %s' % local_csvpath)
logger.info('\n')
break
# except psycopg2.Error as e:
# logger.error(e.pgerror)
# logger.info("Retrying for %s table." % table_name)
# logger.info('\n')
# continue
return
# Full Program to Run Locally-----------------------------------------------
def full_database_migration(instance_filters=None, database_filters=None, table_filters=None):
"""
-instance_filters (dict): for now it can be anything we are going to use to filter the instance:
1. db-cluster-id 2. db-instance-id
A filter name and value pair that is used to return a more specific list of results from a describe operation.
Filters can be used to match a set of resources by specific criteria, such as IDs.
The filters supported by a describe operation are documented with the describe operation.
E.g. [{"Name" :"tag:keyname", "Values":[""] }] - Must explicitly specify "Names" and "Values" pair.
-database_filters (list): simply only append the database names to this list so we only access those databases. By default,
it will access all
-table_filters (list): simply only append table names to this list so we only export those tables. By default it will export all.
"""
# Initiate RDS instance helper to iterate through RDS
rds = RDSHelper()
dbs = rds.describe_db_instances(filters=instance_filters)
logger.info ("Instances List: %s" % list(map(lambda x: x['DBInstanceIdentifier'], dbs)))
for db in dbs:
instance = db['DBInstanceIdentifier']
user = db['MasterUsername']
endpoint = db['Endpoint']
host = endpoint['Address']
port = endpoint['Port']
location = str(db['DBInstanceArn'].split(':')[3])
logger.info('instance: %s' % instance)
logger.info('user: %s' % user)
logger.info('endpoint: %s' % endpoint)
logger.info('host: %s' % host)
logger.info('port: %s' % port)
logger.info('location: %s' % location)
logger.info ("Accessing instance %s ..." % instance)
pg = PGHelper(dbname='postgres', host=host, port=port, user=user)
con = pg.conn()
cur = con.cursor()
def extract_name_query(title, qry):
logger.info('%s' % (title))
cur.execute(qry)
results = cur.fetchall()
result_names = list(map(lambda x: x[0], results))
return result_names
# List all available databases in the same instance
database_names = extract_name_query(
'Extracting databases...', 'SELECT * FROM pg_database')
# Filtering available databases
default_databases = ['postgres', 'rdsadmin', 'template1', 'template0']
database_names = list(
filter(lambda x: x not in default_databases, database_names))
if database_filters:
database_names = list(
filter(lambda x: x in database_filters, database_names))
logger.info("Databases List: %s" % database_names)
# for i in progressbar.progressbar(range(len(database_names))):
for database_name in database_names:
# database_name = database_names[i]
# Change database connection
logger.info("Accessing %s ..." % database_name)
con = pg.conn(database=database_name)
cur = con.cursor()
# List all available tables in the same instance
table_query = "SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE'"
table_names = extract_name_query('Extracting tables...', table_query)
# Filtering available tables
if table_filters:
table_names = list(
filter(lambda x: x in table_names, table_names))
# We should also filter away those tables that does not start with hubble as well: ['delayed_jobs', 'ar_internal_metadata', 'schema_migrations', 'audits']
# We are going to remove hubble_safety_permit_logs as well as it is too big to be exported at the moment.
misc_tables = ['delayed_jobs', 'ar_internal_metadata', 'schema_migrations', 'audits', 'hubble_safety_permit_logs']
table_names = list(
filter(lambda x: x not in misc_tables, table_names)
)
logger.info("Tables List: %s" % table_names)
progressbar.streams.wrap_stderr()
# for table_name in table_names:
for j in progressbar.progressbar(range(len(table_names))):
table_name = table_names[j]
# Rerun for the table when the exception fails
while True:
try:
# Save individual tables to CSV first - as we are sending one table at a time, we can del the csv files
# as soon as we have uploaded them
logger.info("Accessing %s ..." % table_name)
# We will save the time based on the latest commit time. Thus, there will be only one file for one table all time
# However, they might be of different timestamp due to difference in commit time.
s3 = S3Helper()
# Extract latest timestamp separately here:
# Use this query to extract the latest commit timestamp at that point of time
extract_ts_query = "SELECT MAX(pg_xact_commit_timestamp(xmin)) FROM " + table_name + " WHERE pg_xact_commit_timestamp(xmin) IS NOT NULL;"
cur.execute(extract_ts_query)
latest_timestamp = str(cur.fetchone()[0])
# Define needed timestamp to set the csvname we are using.
if latest_timestamp and latest_timestamp != 'None':
logger.info ("Latest Commit Timestamp from PostGres is: %s" % latest_timestamp)
latest_csvtimestamp = s3._convert_s3timestamp(latest_timestamp)
# However, if there is no timestamp at all, then use 24 '0's as the default.
else:
logger.info ("No Commit Timestamp available in PostGres. Using default.")
latest_csvtimestamp = '0' * 24
csvname = table_name + "-" + latest_csvtimestamp + ".csv"
# Respective paths needed
full_folder_path = ("%s/%s/%s/%s") % (DATALAKE_NAME, instance, database_name, table_name)
full_table_path = "%s/%s/%s/%s/%s" % (DATALAKE_NAME, instance, database_name, table_name, csvname)
s3_path = ("s3://%s") % (full_table_path)
# Grab the latest_timestamp from the folder. Ideally, there should only be one file under each table folder, but
# we will still segregate them as such for easy referencing.
table_timestamp = s3.latest_s3timestamp(full_folder_path)
# If we could not get a proper timestamp from s3, it means there is no initial file.
if not table_timestamp:
logger.info ("No CSV found in the respective S3 folder. Exporting all rows from table %s to csv." % table_name)
local_csvpath = '/tmp/' + csvname
with open(local_csvpath, "w") as csvfile:
# Get all of the rows and export them
export_query = "COPY " + table_name + " TO STDOUT WITH CSV HEADER"
cur.copy_expert(export_query, csvfile)
else:
logger.info ("CSV File found with Commit Timestamp: %s." % table_timestamp)
# Since the timestamp is down to the last milisecond, it is almost impossible for it be miss any rows.
# Thus, to save processing time, we share ignore any need to update the table csv if the timestamp is the same.
table_csvtimestamp = s3._convert_s3timestamp(table_timestamp)
if table_csvtimestamp == latest_csvtimestamp:
logger.info ("The latest Commit Timestamp (%s) and the latest S3 Timestamp (%s) are the same. Proceeeding to next table."
% (latest_timestamp, table_timestamp))
logger.info('\n')
break
# If timestamp is 0000.. , we should just use the min datetime to prevent error.
if table_csvtimestamp == '0' * 24:
table_timestamp = datetime.min
# Get only the rows after the committed timestamp retrieved and append that to the current csv.
# If there is no results, just go to the next table
export_query = "SELECT * FROM " + table_name + " WHERE pg_xact_commit_timestamp(xmin) > %s "
cur.execute(export_query, (table_timestamp,))
results = cur.fetchall()
if not results:
logger.info ("No new rows or updates from the current Database.")
logger.info('\n')
break
# Download the file to local storage first, then utilizing it - always save it under /tmp/ directory
# The file will also be deleted from s3
local_csvpath = s3.download_latest(full_folder_path)
with open(local_csvpath, 'a') as csvfile:
# Append by downloading the existing csv and append locally.
logger.info ("Writing rows into current local CSV File...")
for row in results:
writer = csv.writer(csvfile)
writer.writerow(row)
# Upload the file to the respective bucket - Replacing or uploading uses the same function
# This way of uploading would not resetting the entire path, so it is fine to not add a check.
s3.create_folder(full_folder_path, location)
s3.upload(local_csvpath, full_table_path)
latest_timestamp = s3._convert_timestamp(latest_csvtimestamp)
logger.info ('FILE PUT AT: %s with Latest Committed Time (%s)' % (s3_path, latest_timestamp))
# Deleting file from /tmp/ after use
os.remove(local_csvpath)
logger.info ('Local File Deleted')
logger.info('\n')
break
except psycopg2.Error as e:
logger.error(e.pgerror)
logger.info("Retrying for %s table." % table_name)
logger.info('\n')
continue
# Handler to Accomodate to Lambda Context Manager-----------------------------------------------
def handler(event=None, context=None):
# Start Time
start = time.time()
# The tag or name of the instance we want to enter
# test_server = 'arn:aws:rds:ap-southeast-1:160830294233:db:companya'
instance_tags = INSTANCE_TAGS
# The given companies
# database_tags = ['companyaworkers']
database_tags = DATABASE_TAGS
# The related modules needed
# correct_tables = []
full_database_migration(instance_filters=instance_tags, database_filters=database_tags)
end = time.time()
seconds = end - start
time_spent = str(timedelta(seconds=seconds))
logger.info("Time Spent on Script: %s" % time_spent)
if __name__ == "__main__":
handler()
``` |
{
"source": "jindongyang94/sample_luigipipeline",
"score": 3
} |
#### File: src/validation/test.py
```python
import pandas
from luigi_ml_Pipeline import Train
from sklearn.externals import joblib
from feature_builder import FeatureBuilder
from regressor import test_model_ridge
def test():
df_test = pandas.read_csv("/tmp/test.csv")
df_stores = pandas.read_csv("/tmp/rossman_sales_train_stores.csv")
df = pandas.merge(df_test, df_stores, on='Store')
df["Sales"] = 0
sales_model = joblib.load(Train().output().path)
fb = FeatureBuilder(df)
df = fb.featurize()
print df.columns
print test_model_ridge(df,sales_model)
if __name__ == '__main__':
test()
``` |
{
"source": "jindrahelcl/puzzlehunt-tools",
"score": 3
} |
#### File: puzzlehunt-tools/character_ngram/scorer.py
```python
import argparse
import logging
import sys
from ngram_model import SmoothedNGramModel
from string_util import preprocess
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO, datefmt='%H:%M:%S')
logger = logging.getLogger(__name__)
def main(args):
logger.info("Hello! This is %s", sys.argv[0])
model = SmoothedNGramModel.load(args.model)
logger.info(
"%d-gram model file %s loaded, scoring", model.order, args.model)
preprocess_fn = lambda x: preprocess(x, args.normalize, args.lowercase,
args.add_blanks, model.order)
for i, line in enumerate(args.input):
score = model.score(preprocess_fn(line))
print(score)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"model", metavar="MODEL_FILE", type=str, help="Model file")
parser.add_argument(
"input", nargs="?", metavar="INPUT", default=sys.stdin,
help="Input file. Plaintext. Default stdin.",
type=argparse.FileType("r"))
parser.add_argument(
"output", nargs="?", metavar="OUTPUT", default=sys.stdout,
type=argparse.FileType("w"))
parser.add_argument(
"--normalize", default=True, type=bool,
help="Normalize unicode to ascii")
parser.add_argument(
"--lowercase", default=True, type=bool,
help="Lowercase training data")
parser.add_argument(
"--add-blanks", default=True, type=bool,
help="Circumfix the lines with blank characters")
p_args = parser.parse_args(sys.argv[1:])
main(p_args)
```
#### File: puzzlehunt-tools/character_ngram/subcli.py
```python
import os
import sys
class Subcommand(object):
def __init__(self):
self.commands = {}
def __call__(self, name):
def reg_command(fn):
self.commands[name] = fn
return fn
return reg_command
def run(self, argv, failcb):
name = argv[1] if len(argv) >= 2 else None
command = self.commands.get(name)
if command:
try:
command(argv)
except BrokenPipeError:
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(2)
else:
width = max(len(cmd) for cmd in self.commands) + 2
failcb(
"Usage: {} COMMAND [--help | [--] ARG...]\n\n"
"Available COMMANDs:\n{}"
.format(
argv[0],
"\n".join(
" {{:<{}}}{{}}".format(width).format(cmd, fn.__doc__)
for cmd, fn in sorted(self.commands.items())
)
)
)
```
#### File: puzzlehunt-tools/subseq_cracker/generator.py
```python
import sys
from utils import *
def get_generator(classname):
try:
return getattr(sys.modules[__name__], classname)
except AttributeError:
logger.error(f"Unknown generator: '{classname}'.")
return None
class Generator:
def __iter__(self):
pass
class Tmou(Generator):
def __init__(self):
pass
def __iter__(self):
yield from sys.stdin
class SimpleGenerator(Generator):
# do not change, used for examining a single string
def __init__(self, letters):
self.letters = letters
def __iter__(self):
yield self.letters
class GenPotrati(Generator):
def __init__(self):
self.a = ["návěstidlo", "drátytelegrafníhovedení", "potůček", "přístupovácesta", "cestička" , "vozidlo", "vextrovna"]
self.b = [(4,9), (6,6), (5,1), (7,11), (6,12), (7,3), (3,2), (4,8), (5,7), (7,13), (4,10), (5,14), (6,4), (8,5)]
def __iter__(self):
for perm in gen_perms(self.a):
seq = []
for l, word in enumerate(perm):
for i in range(2):
coord = self.b[l*2+i]
char_idx = coord[0]-1
if len(word) <= char_idx:
continue
seq.append((word[char_idx], coord[1]))
if len(seq) != len(self.b):
continue
seq.sort(key=lambda x: x[1])
seq = [x[0] for x in seq]
yield "".join(seq)
``` |
{
"source": "jindrichsamec/kontejnery",
"score": 3
} |
#### File: containers/models/term.py
```python
from datetime import datetime
from database import db
class Term(db.Model):
__tablename__ = 'terms'
id = db.Column(db.Integer, primary_key=True)
container_id = db.Column(db.Integer, db.ForeignKey('containers.id'), nullable=False)
container = db.relationship('Container', back_populates="terms")
datetime_from = db.Column(db.DateTime, nullable=False)
datetime_to = db.Column(db.DateTime, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.now)
updated_at = db.Column(db.DateTime, nullable=False, default=datetime.now, onupdate=datetime.now)
def __repr__(self):
return '<Term: (id: {}) {} - {}>'.format(self.id, self.datetime_from, self.datetime_to)
```
#### File: jindrichsamec/kontejnery/json_encoder.py
```python
from flask.json import JSONEncoder
import flask.ext.restful.representations.json
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, 'datetime'):
return obj.isoformat()
except TypeError:
pass
return JSONEncoder.default(self, obj)
def register_encoder(app):
app.config['RESTFUL_JSON'] = {'cls': CustomJSONEncoder}
```
#### File: jindrichsamec/kontejnery/logger.py
```python
import sys
import logging
def register_logger(app):
formatter = logging.Formatter("[%(levelname)s] %(asctime)s - %(message)s")
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
for h in app.logger.handlers:
app.logger.removeHandler(h)
app.logger.addHandler(handler)
app.logger.setLevel(logging.INFO)
```
#### File: migrations/versions/57b1d6fb5b9a_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '57b1d6fb5b9a'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('containers', sa.Column('created_at', sa.DateTime()))
op.add_column('containers', sa.Column('updated_at', sa.DateTime()))
op.add_column('terms', sa.Column('created_at', sa.DateTime()))
op.add_column('terms', sa.Column('updated_at', sa.DateTime()))
op.execute('UPDATE terms SET created_at = NOW(), updated_at= now()')
op.execute('UPDATE containers SET created_at = NOW(), updated_at= now()')
op.alter_column('containers', 'created_at', nullable=False)
op.alter_column('containers', 'created_at', nullable=False)
op.alter_column('terms', 'created_at', nullable=False)
op.alter_column('terms', 'created_at', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('terms', 'updated_at')
op.drop_column('terms', 'created_at')
op.drop_column('containers', 'updated_at')
op.drop_column('containers', 'created_at')
# ### end Alembic commands ###
```
#### File: migrations/versions/c45a6b3de513_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c45a6b3de513'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('containers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('slug', sa.String(length=255), nullable=False),
sa.Column('position', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_containers_position'), 'containers', ['position'], unique=False)
op.create_table('terms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('container_id', sa.Integer(), nullable=False),
sa.Column('datetime_from', sa.DateTime(), nullable=False),
sa.Column('datetime_to', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['container_id'], ['containers.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('terms')
op.drop_index(op.f('ix_containers_position'), table_name='containers')
op.drop_table('containers')
### end Alembic commands ###
```
#### File: migrations/versions/ee2aa0d04791_.py
```python
from alembic import op
import sqlalchemy as sa
import geoalchemy2
# revision identifiers, used by Alembic.
revision = 'ee2aa0d04791'
down_revision = 'c45a6b3de513'
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# op.drop_table('spatial_ref_sys')
op.add_column('containers', sa.Column('coordinates', geoalchemy2.types.Geography(geometry_type='POINT', srid=4326), nullable=True))
op.drop_index('ix_containers_position', table_name='containers')
op.drop_column('containers', 'position')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('containers', sa.Column('position', sa.VARCHAR(), autoincrement=False, nullable=True))
op.create_index('ix_containers_position', 'containers', ['position'], unique=False)
op.drop_column('containers', 'coordinates')
op.create_table('spatial_ref_sys',
sa.Column('srid', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('auth_name', sa.VARCHAR(length=256), autoincrement=False, nullable=True),
sa.Column('auth_srid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('srtext', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.Column('proj4text', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.CheckConstraint('(srid > 0) AND (srid <= 998999)', name='spatial_ref_sys_srid_check'),
sa.PrimaryKeyConstraint('srid', name='spatial_ref_sys_pkey')
)
### end Alembic commands ###
```
#### File: containers/controls/test_crawler.py
```python
import pytest
from containers.controls.crawler import *
normalize_name_test_data = [
("U Sluncové", 'u-sluncove'),
("U Sluncové x Za Invalidovnou (parkoviště)", 'u-sluncove-x-za-invalidovnou'),
("U Sluncové x Za Invalidovnou (parkoviště) (Kobylisy)", 'u-sluncove-x-za-invalidovnou-parkoviste'),
("Pekařova x Jestřebická (Kobylisy)", 'pekarova-x-jestrebicka'),
]
@pytest.mark.parametrize("name, expected", normalize_name_test_data)
def test_normalize_name(name, expected):
assert expected == normalize_name(name)
``` |
{
"source": "jinedai/zhihu_question",
"score": 2
} |
#### File: jinedai/zhihu_question/mi.py
```python
import os,urllib,urllib2,re
import socket
import json
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
header = {
'Device-Id': 'ffffffff-c690-520f-47dc-5a4708a11582',
'Cookie': 'serviceToken=/<KEY>
'Network-Stat': 'wifi',
'Mishop-Client-Id': '180100031052',
'Screen-width-px': '1080',
'Mishop-Client-VersionName': '4.2.7.0801.r1',
'Accept-Encoding': 'gzip',
'Mishop-Client-VersionCode': '20170801',
'Mishop-Auth': '<PASSWORD>;<PASSWORD>',
'Mishop-Model': 'MI 5s',
'Uuid': '5d60fbc2-372a-f65e-310b-d201f2d3c564',
'Screen-DensityDpi': '480',
'Mishop-Is-Pad': '0',
'device': '5rqXkVAmt5lWh975gaRttg==',
'Android-Ver': '23',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'api.m.mi.com',
'Connection': 'Keep-Alive',
'User-Agent': 'okhttp/3.4.1'
}
##====================以下为方法========================##
def getContent():
url = 'http://api.m.mi.com/v1/hisearch/se_home'
data = {
'input_word' : '小爱同学',
'query' : '小爱同学',
'page_index' : '1',
'checkbox' : '0',
'page_size' : 20
}
data = urllib.urlencode(data)
req = urllib2.Request(url, data, headers = header)
try:
currentPage=json.loads(urllib2.urlopen(req, timeout=10).read())
return currentPage['data']['red_session']
except urllib2.URLError:
print 'url error'
return None
except socket.error:
print 'socket result'
return None
except Exception,e:
print 'other error'
return None
def getResult(session_id):
url = 'http://api.m.mi.com/v1/misearch/roll'
data = {
'session_id' : session_id,
'query' : '小爱同学',
}
data = urllib.urlencode(data)
try:
req = urllib2.Request(url, data, headers = header)
currentPage= urllib2.urlopen(req, timeout=10).read()
print session_id
except urllib2.URLError:
print 'url error'
except socket.error:
print 'socket result'
except Exception,e:
print 'other error'
if __name__ == '__main__':
i = 1
while (1):
print i
red_session = getContent()
if not red_session is None:
result = getResult(red_session)
i = i + 1
```
#### File: jinedai/zhihu_question/util.py
```python
import urllib2
import gzip
import StringIO
import ConfigParser
def get_content(toUrl,count):
""" Return the content of given url
Args:
toUrl: aim url
count: index of this connect
Return:
content if success
'Fail' if fail
"""
cf = ConfigParser.ConfigParser()
cf.read("config.ini")
cookie = cf.get("cookie", "cookie")
headers = {
'Cookie': cookie,
'Host':'www.zhihu.com',
'Referer':'http://www.zhihu.com/',
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept-Encoding':'gzip'
}
req = urllib2.Request(
url = toUrl,
headers = headers
)
try:
opener = urllib2.build_opener(urllib2.ProxyHandler())
urllib2.install_opener(opener)
page = urllib2.urlopen(req,timeout = 15)
headers = page.info()
content = page.read()
except Exception,e:
if count % 1 == 0:
print str(count) + ", Error: " + str(e) + " URL: " + toUrl
return "FAIL"
if page.info().get('Content-Encoding') == 'gzip':
data = StringIO.StringIO(content)
gz = gzip.GzipFile(fileobj=data)
content = gz.read()
gz.close()
return content
``` |
{
"source": "jinengandhi-intel/avalon",
"score": 2
} |
#### File: src/libs/submit_request.py
```python
import logging
import json
import time
import os
import config.config as pconfig
from avalon_sdk.connector.direct.jrpc.jrpc_worker_registry import \
JRPCWorkerRegistryImpl
from avalon_sdk.connector.direct.jrpc.jrpc_work_order import \
JRPCWorkOrderImpl
from avalon_sdk.worker.worker_details import \
WorkerType, WorkerStatus
from avalon_sdk.connector.direct.jrpc.jrpc_work_order_receipt \
import JRPCWorkOrderReceiptImpl
from avalon_sdk.connector.blockchains.fabric.fabric_worker_registry \
import FabricWorkerRegistryImpl
from avalon_sdk.connector.blockchains.fabric.fabric_work_order \
import FabricWorkOrderImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_worker_registry \
import EthereumWorkerRegistryImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_work_order \
import EthereumWorkOrderProxyImpl
import avalon_sdk.worker.worker_details as worker_details
from conftest import env
logger = logging.getLogger(__name__)
def config_file_read():
tcf_connector_conffile = [env['tcf_connector_conffile']]
confpaths = [env['confpaths']]
config = pconfig.parse_configuration_files(
tcf_connector_conffile, confpaths)
config["tcf"]["json_rpc_uri"] = env['uri_client_sdk']
return config
def _create_worker_registry_instance(blockchain_type, config):
"""
This function returns the sdk/proxy implementation class of worker register.
"""
if env['proxy_mode'] and blockchain_type == 'fabric':
return FabricWorkerRegistryImpl(config)
elif env['proxy_mode'] and blockchain_type == 'ethereum':
return EthereumWorkerRegistryImpl(config)
else:
return JRPCWorkerRegistryImpl(config)
def _create_work_order_instance(blockchain_type, config):
"""
This function returns the sdk/proxy implementation class of worker order.
"""
if env['proxy_mode'] and blockchain_type == 'fabric':
return FabricWorkOrderImpl(config)
elif env['proxy_mode'] and blockchain_type == 'ethereum':
return EthereumWorkOrderProxyImpl(config)
else:
return JRPCWorkOrderImpl(config)
def _create_work_order_receipt_instance(blockchain_type, config):
"""
This function returns the sdk implementation class for work order receipt.
Returns None in case of Proxy mode.
"""
if env['proxy_mode'] and blockchain_type == 'fabric':
return None
elif env['proxy_mode'] and blockchain_type == 'ethereum':
# TODO need to implement
return None
else:
return JRPCWorkOrderReceiptImpl(config)
def submit_request_listener(
uri_client, input_json_str):
"""
This function is called from the workorder submit tests.
It is used to submit the request directly to listener, when the test mode is
set to listener in env.py
"""
request_method = input_json_str["method"]
input_json_str = json.dumps(input_json_str)
if request_method == "WorkOrderGetResult":
logger.info("- Validating WorkOrderGetResult Response-")
response = {}
response_timeout_start = time.time()
response_timeout_multiplier = ((6000 / 3600) + 6) * 3
while "result" not in response:
if "error" in response:
if response["error"]["code"] != 5:
logger.info('WorkOrderGetResult - '
'Response received with error code. ')
err_cd = 1
break
response_timeout_end = time.time()
if ((response_timeout_end - response_timeout_start) >
response_timeout_multiplier):
logger.info('ERROR: WorkOrderGetResult response is not \
received within expected time.')
break
response = uri_client._postmsg(input_json_str)
else:
logger.info(
'**********Received Request*********\n%s\n',
input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info(
'**********Submit Request Response *********\n%s\n',
response)
return response
def workorder_submit_sdk(wo_params, input_json_obj=None):
"""
This function sets up the params file for workorder submit API and
calls the respective SDK as per the direct/proxy mode set in env.py
"""
if input_json_obj is None:
req_id = 3
else:
req_id = input_json_obj["id"]
config = config_file_read()
work_order = _create_work_order_instance(env['blockchain_type'], config)
logger.info("Work order submit request : %s, \n \n ",
wo_params.to_jrpc_string(req_id))
response = work_order.work_order_submit(
wo_params.get_work_order_id(),
wo_params.get_worker_id(),
wo_params.get_requester_id(),
wo_params.to_string(),
id=req_id
)
if env['proxy_mode'] and (not isinstance(response, dict)):
if response.value == 0:
response = {"error": {"code": 5}}
else:
response = {"error": {"code": response.value}}
response["workOrderId"] = wo_params.get_work_order_id()
logger.info('**********Received Response*********\n%s\n', response)
return response
def worker_lookup_sdk(worker_type, input_json=None):
"""
This function will send the WorkerLookUp request for SDK Model.
It will handle both ethereum and sdk function calls
"""
logger.info("WorkerLookUp SDK code path\n")
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
worker_registry = _create_worker_registry_instance(
env['blockchain_type'], config)
if env['blockchain_type'] == "ethereum":
if worker_type in worker_dict.keys():
worker = WorkerType.TEE_SGX
else:
worker = worker_type
worker_lookup_response = worker_registry.worker_lookup(
worker,
config["WorkerConfig"]["OrganizationId"],
config["WorkerConfig"]["ApplicationTypeId"],
jrpc_req_id
)
else:
worker_lookup_response = worker_registry.worker_lookup(
worker_type=worker_dict.get(worker_type, worker_type),
id=jrpc_req_id)
logger.info("\n Worker lookup response: {}\n".format(
json.dumps(worker_lookup_response, indent=4)
))
return worker_lookup_response
def worker_register_sdk(register_params, input_json):
"""
This function will send the WorkerRegister request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
logger.info("WorkerRegister SDK code path\n")
jrpc_req_id = input_json["id"]
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
config = config_file_read()
worker_registry = _create_worker_registry_instance(
env['blockchain_type'], config)
if env['proxy_mode'] and (env['blockchain_type'] == "ethereum"):
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]))
else:
worker_register_result = worker_registry.worker_register(
register_params["workerId"],
worker_dict[register_params["workerType"]],
register_params.get("organizationId"),
register_params.get("applicationTypeId"),
json.dumps(register_params["details"]), jrpc_req_id)
if env['proxy_mode'] and (not isinstance(worker_register_result, dict)):
response = worker_register_result.value
worker_register_result = {"error": {"code": response, "message": ""}}
logger.info("\n Worker register response: {}\n".format(
worker_register_result))
return worker_register_result
def worker_setstatus_sdk(set_status_params, input_json):
"""
This function will send the WorkerSetStatus request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
logger.info("WorkerSetStatus SDK code path\n")
logger.info("Worker status params %s \n", set_status_params)
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
status_dict = {1: WorkerStatus.ACTIVE, 2: WorkerStatus.OFF_LINE,
3: WorkerStatus.DECOMMISSIONED,
4: WorkerStatus.COMPROMISED}
config = config_file_read()
worker_registry = _create_worker_registry_instance(
env['blockchain_type'], config)
if env['proxy_mode'] and (env['blockchain_type'] == "ethereum"):
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]])
else:
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]], jrpc_req_id)
if env['proxy_mode']:
result = worker_setstatus_result
worker_setstatus_result = {}
worker_setstatus_result["error"] = {
"code": result.value, "message": ""}
logger.info("\n Worker setstatus response: {}\n".format(
worker_setstatus_result))
return worker_setstatus_result
def worker_retrieve_sdk(worker_id, input_json=None):
"""
This function will send the WorkerRetrieve request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
logger.info("WorkerRetrieve SDK code path\n")
worker_obj = worker_details.SGXWorkerDetails()
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(
env['blockchain_type'], config)
worker_retrieve_result = worker_registry.worker_retrieve(
worker_id, jrpc_req_id)
if env['proxy_mode']:
if worker_retrieve_result is None:
worker_retrieve_result = {
"error": {
"code": '',
"message": "Worker Id not found"}}
else:
response = worker_retrieve_result
worker_obj.load_worker(json.loads(response[4]))
worker_retrieve_result = {}
result = {"workerType": response[1],
"organizationId": response[2],
"applicationTypeId": response[3],
"details": json.loads(response[4])}
worker_retrieve_result["result"] = result
if "error" in worker_retrieve_result:
logger.error("Unable to retrieve worker details\n")
return worker_retrieve_result
logger.info("\n Worker retrieve response: {}\n".format(
worker_retrieve_result))
worker_obj.worker_id = worker_id
worker_retrieve_result["workerId"] = worker_id
logger.info("\n Worker ID\n%s\n", worker_id)
return worker_retrieve_result
def worker_update_sdk(update_params, input_json=None):
"""
This function will send the WorkerUpdate request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
logger.info("WorkerUpdate SDK code path\n")
logger.info("Worker update params %s \n", update_params)
worker_obj = worker_details.SGXWorkerDetails()
# update_params = json.loads(update_params)
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(
env['blockchain_type'], config)
if env['proxy_mode'] and (env['blockchain_type'] == "ethereum"):
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]))
else:
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]), jrpc_req_id)
if env['proxy_mode'] and (not isinstance(worker_update_result, dict)):
response = worker_update_result.value
worker_update_result = {"error": {"code": response, "message": ""}}
logger.info("\n Worker update response: {}\n".format(worker_update_result))
return worker_update_result
def workorder_receiptcreate_sdk(wo_create_receipt, input_json):
"""
This function will send the WorkOrderReceiptCreate request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
logger.info("WorkerReceiptCreate SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
# Create receipt
wo_receipt = _create_work_order_receipt_instance(
env['blockchain_type'], config)
# Submit work order create receipt jrpc request
wo_receipt_resp = wo_receipt.work_order_receipt_create(
wo_create_receipt["workOrderId"],
wo_create_receipt["workerServiceId"],
wo_create_receipt["workerId"],
wo_create_receipt["requesterId"],
wo_create_receipt["receiptCreateStatus"],
wo_create_receipt["workOrderRequestHash"],
wo_create_receipt["requesterGeneratedNonce"],
wo_create_receipt["requesterSignature"],
wo_create_receipt["signatureRules"],
wo_create_receipt["receiptVerificationKey"],
jrpc_req_id
)
logger.info("Work order create receipt response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
def workorder_receiptretrieve_sdk(workorderId, input_json):
"""
This function will send the WorkOrderReceiptRetrieve request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
# Create receipt
wo_receipt = _create_work_order_receipt_instance(
env['blockchain_type'], config)
wo_receipt_resp = wo_receipt.work_order_receipt_retrieve(
workorderId, jrpc_req_id)
logger.info("Work order retrieve receipt response : {} \n \n ".format(
wo_receipt_resp
))
# Retrieve last update to receipt by passing 0xFFFFFFFF
jrpc_req_id += 1
receipt_update_retrieve = \
wo_receipt.work_order_receipt_update_retrieve(
workorderId,
None,
1 << 32,
id=jrpc_req_id)
logger.info("\n Last update to receipt receipt is:\n {}".format(
json.dumps(receipt_update_retrieve, indent=4)
))
return wo_receipt_resp
def workorder_getresult_sdk(workorderId, input_json):
"""
This function will send the WorkerRegister request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
jrpc_req_id = input_json["id"]
config = config_file_read()
work_order = _create_work_order_instance(env['blockchain_type'], config)
logger.info("----- Validating WorkOrderGetResult Response ------")
get_result_res = work_order.work_order_get_result(
workorderId, jrpc_req_id)
logger.info(
"****** WorkOrderGetResult Received Response*****\n%s\n",
get_result_res)
if env['proxy_mode'] and (get_result_res is None):
get_result_res = {"error": {"code": -1}}
return get_result_res
def workorder_receiptlookup_sdk(requesterId, input_json):
"""
This function will send the WorkOrderReceiptLookUp request for SDK Model
It will take care of both Ethereum function call and SDK/Fabric
function call
"""
jrpc_req_id = input_json["id"]
config = config_file_read()
wo_receipt = _create_work_order_receipt_instance(
env['blockchain_type'], config)
wo_receipt_resp = wo_receipt.work_order_receipt_lookup(
requester_id=requesterId, id=jrpc_req_id)
logger.info("Work order receipt lookup response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
```
#### File: tests/worker_tests/test_set_status.py
```python
import pytest
import logging
from src.libs.verification_libs \
import validate_response_code
from src.libs.pre_processing_libs \
import ResultStatus
from src.libs.avalon_test_base import AvalonBase
from conftest import env
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures("setup_teardown")
class TestClass():
test_obj = AvalonBase()
pytestmark = pytest.mark.setup_teardown_data(
test_obj, "WorkerSetStatus")
@pytest.mark.listener
@pytest.mark.sdk
@pytest.mark.proxy
def test_worker_set_status_success(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
@pytest.mark.listener
def test_worker_set_status_unknown_parameter(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
@pytest.mark.listener
def test_worker_set_status_invalid_parameter(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
@pytest.mark.listener
def test_worker_set_status_params_status_0(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
@pytest.mark.listener
@pytest.mark.sdk
@pytest.mark.proxy
def test_worker_set_status_params_status_2(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
@pytest.mark.listener
@pytest.mark.sdk
@pytest.mark.proxy
def test_worker_set_status_params_status_3(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
@pytest.mark.listener
@pytest.mark.sdk
@pytest.mark.proxy
def test_worker_set_status_params_status_4(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
@pytest.mark.listener
def test_worker_set_status_params_status_5(self):
result_response = self.test_obj.run_test(
env['worker_setstatus_input_file'])
assert (
validate_response_code(
result_response,
env['expected_error_code']) is ResultStatus.SUCCESS.value)
self.test_obj.teardown(env['worker_setstatus_input_file'])
``` |
{
"source": "jinensetpal/panoptic-reproducibility",
"score": 3
} |
#### File: src/models/activations.py
```python
import functools
import tensorflow as tf
def gelu(input_tensor, approximate=False):
"""Gaussian Error Linear Unit.
Reference:
Gaussian Error Linear Units (GELUs), <NAME>, <NAME>, arXiv 2016.
Args:
input_tensor: A tensor with an arbitrary shape.
approximate: A boolean, whether to enable approximation.
Returns:
The activated input tensor.
"""
return tf.keras.activations.gelu(input_tensor, approximate=approximate)
def hard_sigmoid(input_tensor):
"""Hard sigmoid activation function.
Args:
input_tensor: A tensor with an arbitrary shape.
Returns:
The activated input tensor.
"""
input_tensor = tf.convert_to_tensor(input_tensor)
return tf.nn.relu6(input_tensor + tf.constant(3.)) * 0.16667
def relu6(input_tensor):
"""Relu6 activation function.
Args:
input_tensor: A tensor with an arbitrary shape.
Returns:
The activated input tensor.
"""
input_tensor = tf.convert_to_tensor(input_tensor)
return tf.nn.relu6(input_tensor)
def swish(input_tensor):
"""Swish or SiLU activation function.
Args:
input_tensor: A tensor with an arbitrary shape.
Returns:
The activated input tensor.
"""
input_tensor = tf.convert_to_tensor(input_tensor)
return tf.nn.silu(input_tensor)
def hard_swish(input_tensor):
"""Hard Swish function.
Args:
input_tensor: A tensor with an arbitrary shape.
Returns:
The activated input tensor.
"""
input_tensor = tf.convert_to_tensor(input_tensor)
return input_tensor * tf.nn.relu6(
input_tensor + tf.constant(3.)) * (1. / 6.)
def identity(input_tensor):
"""Identity function.
Useful for helping in quantization.
Args:
input_tensor: A tensor with an arbitrary shape.
Returns:
The activated input tensor.
"""
input_tensor = tf.convert_to_tensor(input_tensor)
return tf.identity(input_tensor)
def get_activation(identifier):
"""Gets activation function via input identifier.
This function returns the specified customized activation function, if there
is any. Otherwise, tf.keras.activations.get is called.
Args:
identifier: A string, name of the activation function.
Returns:
The specified activation function.
"""
if isinstance(identifier, str):
name_to_fn = {
'gelu': functools.partial(gelu, approximate=False),
'approximated_gelu': functools.partial(gelu, approximate=True),
'silu': swish,
'swish': swish,
'hard_swish': hard_swish,
'relu6': relu6,
'hard_sigmoid': hard_sigmoid,
'identity': identity,
'none': identity,
}
identifier = str(identifier).lower()
if identifier in name_to_fn:
return name_to_fn[identifier]
return tf.keras.activations.get(identifier)
```
#### File: src/models/convolutions.py
```python
import functools
from typing import Optional
import tensorflow as tf
import utils
import activations
def _compute_padding_size(kernel_size, atrous_rate):
kernel_size_effective = kernel_size + (kernel_size - 1) * (atrous_rate - 1)
pad_total = kernel_size_effective - 1
pad_begin = pad_total // 2
pad_end = pad_total - pad_begin
if pad_begin != pad_end:
print('Convolution requires one more padding to the '
'bottom-right pixel. This may cause misalignment.')
return pad_begin, pad_end
class GlobalContext(tf.keras.layers.Layer):
"""Class for the global context modules in Switchable Atrous Convolution."""
def build(self, input_shape):
super().build(input_shape)
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
self.global_average_pooling = tf.keras.layers.GlobalAveragePooling2D()
self.convolution = tf.keras.layers.Conv2D(
input_channel, 1, strides=1, padding='same', name=self.name + '_conv',
kernel_initializer='zeros', bias_initializer='zeros')
def call(self, inputs, *args, **kwargs):
outputs = self.global_average_pooling(inputs)
outputs = tf.expand_dims(outputs, axis=1)
outputs = tf.expand_dims(outputs, axis=1)
outputs = self.convolution(outputs)
return inputs + outputs
def _get_input_channel(self, input_shape):
# Reference: tf.keras.layers.convolutional.Conv.
if input_shape.dims[-1].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[-1])
class SwitchableAtrousConvolution(tf.keras.layers.Conv2D):
"""Class for the Switchable Atrous Convolution."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._average_pool = tf.keras.layers.AveragePooling2D(
pool_size=(5, 5), strides=1, padding='same')
self._switch = tf.keras.layers.Conv2D(
1,
kernel_size=1,
strides=self.strides,
padding='same',
dilation_rate=1,
name='switch',
kernel_initializer='zeros',
bias_initializer='zeros')
def build(self, input_shape):
super().build(input_shape)
if self.padding == 'causal':
tf_padding = 'VALID'
elif isinstance(self.padding, str):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
large_dilation_rate = list(self.dilation_rate)
large_dilation_rate = [r * 3 for r in large_dilation_rate]
self._large_convolution_op = functools.partial(
tf.nn.convolution,
strides=list(self.strides),
padding=tf_padding,
dilations=large_dilation_rate,
data_format=self._tf_data_format,
name=self.__class__.__name__ + '_large')
def call(self, inputs):
# Reference: tf.keras.layers.convolutional.Conv.
input_shape = inputs.shape
switches = self._switch(self._average_pool(inputs))
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = tf.compat.v1.pad(inputs, self._compute_causal_padding(inputs))
outputs = self._convolution_op(inputs, self.kernel)
outputs_large = self._large_convolution_op(inputs, self.kernel)
outputs = switches * outputs_large + (1 - switches) * outputs
if self.use_bias:
outputs = tf.nn.bias_add(
outputs, self.bias, data_format=self._tf_data_format)
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(input_shape)
outputs.set_shape(out_shape)
if self.activation is not None:
return self.activation(outputs)
return outputs
def squeeze_batch_dims(self, inp, op, inner_rank):
# Reference: tf.keras.utils.conv_utils.squeeze_batch_dims.
with tf.name_scope('squeeze_batch_dims'):
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = tf.compat.v1.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = tf.compat.v1.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tf.TensorShape):
inp_reshaped = tf.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = tf.reshape(
inp, tf.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = tf.compat.v1.shape(out_reshaped)[-inner_rank:]
out = tf.reshape(
out_reshaped, tf.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
class Conv2DSame(tf.keras.layers.Layer):
"""A wrapper class for a 2D convolution with 'same' padding.
In contrast to tf.keras.layers.Conv2D, this layer aligns the kernel with the
top-left corner rather than the bottom-right corner. Optionally, a batch
normalization and an activation can be added on top.
"""
def __init__(
self,
output_channels: int,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
bn_gamma_initializer: str = 'ones',
activation: Optional[str] = None,
use_switchable_atrous_conv: bool = False,
use_global_context_in_sac: bool = False,
conv_kernel_weight_decay: float = 0.0):
"""Initializes convolution with zero padding aligned to the top-left corner.
DeepLab aligns zero padding differently to tf.keras 'same' padding.
Considering a convolution with a 7x7 kernel, a stride of 2 and an even input
size, tf.keras 'same' padding will add 2 zero padding to the top-left and 3
zero padding to the bottom-right. However, for consistent feature alignment,
DeepLab requires an equal padding of 3 in all directions. This behavior is
consistent with e.g. the ResNet 'stem' block.
Args:
output_channels: An integer specifying the number of filters of the
convolution.
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
bn_gamma_initializer: An initializer for the batch norm gamma weight.
activation: An optional flag specifying an activation function to be added
after the convolution.
use_switchable_atrous_conv: Boolean, whether the layer uses switchable
atrous convolution.
use_global_context_in_sac: Boolean, whether the switchable atrous
convolution (SAC) uses pre- and post-global context.
conv_kernel_weight_decay: A float, the weight decay for convolution
kernels.
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(Conv2DSame, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError('Conv2DSame is using convolution bias with batch_norm.')
if use_global_context_in_sac:
self._pre_global_context = GlobalContext(name='pre_global_context')
convolution_op = tf.keras.layers.Conv2D
convolution_padding = 'same'
if strides == 1 or strides == (1, 1):
if use_switchable_atrous_conv:
convolution_op = SwitchableAtrousConvolution
else:
padding = _compute_padding_size(kernel_size, atrous_rate)
self._zeropad = tf.keras.layers.ZeroPadding2D(
padding=(padding, padding), name='zeropad')
convolution_padding = 'valid'
self._conv = convolution_op(
output_channels,
kernel_size,
strides=strides,
padding=convolution_padding,
use_bias=use_bias,
dilation_rate=atrous_rate,
name='conv',
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2(
conv_kernel_weight_decay))
if use_global_context_in_sac:
self._post_global_context = GlobalContext(name='post_global_context')
if use_bn:
self._batch_norm = bn_layer(axis=3, name='batch_norm',
gamma_initializer=bn_gamma_initializer)
self._activation_fn = None
if activation is not None:
self._activation_fn = activations.get_activation(activation)
self._use_global_context_in_sac = use_global_context_in_sac
self._strides = strides
self._use_bn = use_bn
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = input_tensor
if self._use_global_context_in_sac:
x = self._pre_global_context(x)
if not (self._strides == 1 or self._strides == (1, 1)):
x = self._zeropad(x)
x = self._conv(x)
if self._use_global_context_in_sac:
x = self._post_global_context(x)
if self._use_bn:
x = self._batch_norm(x, training=training)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class DepthwiseConv2DSame(tf.keras.layers.Layer):
"""A wrapper class for a 2D depthwise convolution.
In contrast to convolutions in tf.keras.layers.DepthwiseConv2D, this layers
aligns the kernel with the top-left corner rather than the bottom-right
corner. Optionally, a batch normalization and an activation can be added.
"""
def __init__(self,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer=tf.keras.layers.BatchNormalization,
activation: Optional[str] = None):
"""Initializes a 2D depthwise convolution.
Args:
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
activation: An optional flag specifying an activation function to be added
after the convolution.
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(DepthwiseConv2DSame, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError(
'DepthwiseConv2DSame is using convlution bias with batch_norm.')
if strides == 1 or strides == (1, 1):
convolution_padding = 'same'
else:
padding = _compute_padding_size(kernel_size, atrous_rate)
self._zeropad = tf.keras.layers.ZeroPadding2D(
padding=(padding, padding), name='zeropad')
convolution_padding = 'valid'
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=convolution_padding,
use_bias=use_bias,
dilation_rate=atrous_rate,
name='depthwise_conv')
if use_bn:
self._batch_norm = bn_layer(axis=3, name='batch_norm')
self._activation_fn = None
if activation is not None:
self._activation_fn = activations.get_activation(activation)
self._strides = strides
self._use_bn = use_bn
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = input_tensor
if not (self._strides == 1 or self._strides == (1, 1)):
x = self._zeropad(x)
x = self._depthwise_conv(x)
if self._use_bn:
x = self._batch_norm(x, training=training)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class SeparableConv2DSame(tf.keras.layers.Layer):
"""A wrapper class for a 2D separable convolution.
In contrast to convolutions in tf.keras.layers.SeparableConv2D, this layers
aligns the kernel with the top-left corner rather than the bottom-right
corner. Optionally, a batch normalization and an activation can be added.
"""
def __init__(
self,
output_channels: int,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
activation: Optional[str] = None):
"""Initializes a 2D separable convolution.
Args:
output_channels: An integer specifying the number of filters of the
convolution output.
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
activation: An optional flag specifying an activation function to be added
after the convolution.
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(SeparableConv2DSame, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError(
'SeparableConv2DSame is using convolution bias with batch_norm.')
self._depthwise = DepthwiseConv2DSame(
kernel_size=kernel_size,
name='depthwise',
strides=strides,
atrous_rate=atrous_rate,
use_bias=use_bias,
use_bn=use_bn,
bn_layer=bn_layer,
activation=activation)
self._pointwise = Conv2DSame(
output_channels=output_channels,
kernel_size=1,
name='pointwise',
strides=1,
atrous_rate=1,
use_bias=use_bias,
use_bn=use_bn,
bn_layer=bn_layer,
activation=activation)
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = self._depthwise(input_tensor, training=training)
return self._pointwise(x, training=training)
class StackedConv2DSame(tf.keras.layers.Layer):
"""Stacked Conv2DSame or SeparableConv2DSame.
This class sequentially stacks a given number of Conv2DSame layers or
SeparableConv2DSame layers.
"""
def __init__(
self,
num_layers: int,
conv_type: str,
output_channels: int,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
activation: Optional[str] = None):
"""Initializes a stack of convolutions.
Args:
num_layers: The number of convolutions to create.
conv_type: A string specifying the convolution type used in each block.
Must be one of 'standard_conv' or 'depthwise_separable_conv'.
output_channels: An integer specifying the number of filters of the
convolution output.
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
activation: An optional flag specifying an activation function to be added
after the convolution.
Raises:
ValueError: An error occurs when conv_type is neither 'standard_conv'
nor 'depthwise_separable_conv'.
"""
super(StackedConv2DSame, self).__init__(name=name)
if conv_type == 'standard_conv':
convolution_op = Conv2DSame
elif conv_type == 'depthwise_separable_conv':
convolution_op = SeparableConv2DSame
else:
raise ValueError('Convolution %s not supported.' % conv_type)
for index in range(num_layers):
current_name = utils.get_conv_bn_act_current_name(index, use_bn,
activation)
utils.safe_setattr(self, current_name, convolution_op(
output_channels=output_channels,
kernel_size=kernel_size,
name=utils.get_layer_name(current_name),
strides=strides,
atrous_rate=atrous_rate,
use_bias=use_bias,
use_bn=use_bn,
bn_layer=bn_layer,
activation=activation))
self._num_layers = num_layers
self._use_bn = use_bn
self._activation = activation
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = input_tensor
for index in range(self._num_layers):
current_name = utils.get_conv_bn_act_current_name(index, self._use_bn,
self._activation)
x = getattr(self, current_name)(x, training=training)
return x
class Conv1D(tf.keras.layers.Layer):
"""A wrapper class for a 1D convolution with batch norm and activation.
Conv1D creates a convolution kernel that is convolved with the layer input
over a single spatial (or temporal) dimension to produce a tensor of outputs.
The input should always be 3D with shape [batch, length, channel], so
accordingly, the optional batch norm is done on axis=2.
In DeepLab, we use Conv1D only with kernel_size = 1 for dual path transformer
layers in MaX-DeepLab [1] architectures.
Reference:
[1] MaX-DeepLab: End-to-End Panoptic Segmentation with Mask Transformers,
CVPR 2021.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"""
def __init__(
self,
output_channels: int,
name: str,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
bn_gamma_initializer: str = 'ones',
activation: Optional[str] = None,
conv_kernel_weight_decay: float = 0.0,
kernel_initializer='he_normal',
kernel_size: int = 1,
padding: str = 'valid'):
"""Initializes a Conv1D.
Args:
output_channels: An integer specifying the number of filters of the
convolution.
name: A string specifying the name of this layer.
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
bn_gamma_initializer: An initializer for the batch norm gamma weight.
activation: An optional flag specifying an activation function to be added
after the convolution.
conv_kernel_weight_decay: A float, the weight decay for convolution
kernels.
kernel_initializer: An initializer for the convolution kernel.
kernel_size: An integer specifying the size of the convolution kernel.
padding: An optional string specifying the padding to use. Must be either
'same' or 'valid' (default: 'valid').
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(Conv1D, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError('Conv1D is using convlution bias with batch_norm.')
self._conv = tf.keras.layers.Conv1D(
output_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
use_bias=use_bias,
name='conv',
kernel_initializer=kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(
conv_kernel_weight_decay))
self._batch_norm = None
if use_bn:
# Batch norm uses axis=2 because the input is 3D with channel being the
# last dimension.
self._batch_norm = bn_layer(axis=2, name='batch_norm',
gamma_initializer=bn_gamma_initializer)
self._activation_fn = None
if activation is not None:
self._activation_fn = activations.get_activation(activation)
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, length,
channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = self._conv(input_tensor)
if self._batch_norm is not None:
x = self._batch_norm(x, training=training)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
``` |
{
"source": "jinesh90/CMPR425-DSAA",
"score": 4
} |
#### File: CMPR425-DSAA/Hash/hash.py
```python
from slist import slist
class Hash:
"""
hash implementation for python
"""
def __init__(self, size):
"""
initialized with size of dict
:param size:
"""
self._table_size = size
self._size = 0
self._hash = []
for i in range(self._table_size):
x = self._hash.append(slist())
def insert(self, key):
"""
insert key to hash
:param key:
:return:
"""
x = self._hash_function(key)
self._hash[x].append(key)
self._increment_size()
def find(self, key):
"""
find the key from hash
:param key:
:return:
"""
x = self._hash_function(key)
f = self._hash[x].find(key)
return f
def delete(self, key):
"""
delete the key from hash
:param key:
:return:
"""
x = self._hash_function(key)
a = self._hash[x].delete(key)
self._decrement_size()
def _hash_function(self, key):
"""
simple hash function for
:param key:
:return:
"""
return key % (self._table_size)
def _increment_size(self):
self._size += 1
def _decrement_size(self):
self._size -= 1
def __len__(self):
return self._size
if __name__ == '__main__':
h = Hash(10)
h.insert(23)
h.insert(34)
h.insert(43)
print(h.find(43))
h.delete(43)
print(h.find(43))
print(len(h))
```
#### File: CMPR425-DSAA/Leetcode/letter_combination_of_phone_number.py
```python
def letter_of_combination(digits):
"""
brute force approach, because we have constrain 0<=digit<=4, we can use this method.
:param digits:
:return:
"""
phone_map = {
"2": ["a","b","c"],
"3": ["d","e","f"],
"4":["g","h","i"],
"5": ["j","k","l"],
"6": ["m","n","o"],
"7":["p","q","r","s"],
"8":["t","u","v"],
"9":["w","x","y","z"]
}
combinations = []
digit_len = len(digits)
if digit_len == 0:
return combinations
elif digit_len == 1:
letter_map = phone_map.get(digits)
return letter_map
elif digit_len == 2:
letter_map_1 = phone_map.get(digits[0])
letter_map_2 = phone_map.get(digits[1])
combinations = ["{}{}".format(x, y) for x in letter_map_1 for y in letter_map_2]
return combinations
elif digit_len == 3:
letter_map_1 = phone_map.get(digits[0])
letter_map_2 = phone_map.get(digits[1])
letter_map_3 = phone_map.get(digits[2])
combinations = ["{}{}{}".format(x, y, z) for x in letter_map_1 for y in letter_map_2 for z in letter_map_3]
return combinations
else:
letter_map_1 = phone_map.get(digits[0])
letter_map_2 = phone_map.get(digits[1])
letter_map_3 = phone_map.get(digits[2])
letter_map_4 = phone_map.get(digits[3])
combinations = ["{}{}{}{}".format(x, y, z, q) for x in letter_map_1 for y in letter_map_2 for z in letter_map_3 for q in letter_map_4]
return combinations
def letter_of_combinamtion_backtrace(digits):
pass
```
#### File: CMPR425-DSAA/Leetcode/longest_substring.py
```python
def find_longest_brute_force(s):
def check(start, end):
chars = [0] * 128
for i in range(start, end + 1):
c = s[i]
chars[ord(c)] += 1
if chars[ord(c)] > 1:
return False
return True
n = len(s)
res = 0
for i in range(n):
for j in range(i, n):
if check(i, j):
res = max(res, j - i + 1)
return res
s = "abcabcbbafghai"
print(find_longest_brute_force(s))
```
#### File: CMPR425-DSAA/Recursion/find_max.py
```python
def find_max(array, i):
if i == len(array) - 1:
return array[i]
else:
tmp_min = array[i]
i = i + 1
min_frm = find_max(array, i)
if tmp_min > min_frm:
return tmp_min
else:
return min_frm
#return max(tmp_min, min_frm)
#a = [x for x in range(1000)]
a = [2, 1, 3, 10,60, 54, -1, 2, 5, 0, 34, 8]
print (find_max(a, 0))
```
#### File: CMPR425-DSAA/Recursion/find_min.py
```python
def find_min(array, i):
if i == len(array) - 1:
return array[i]
else:
tmp_min = array[i]
i = i + 1
min_frm = find_min(array, i)
return min(tmp_min, min_frm)
a = [2, 1, 3, 10, 53, 23, -1, 2, 5, 0, 34, 8]
#a = [x for x in range(1000)]
print (find_min(a, 0))
```
#### File: CMPR425-DSAA/Recursion/tower_of_hanoi.py
```python
def move(disks, source, auxiliary, target):
if disks > 0:
# move `N-1` discs from source to auxiliary using the target
# as an intermediate pole
move(disks - 1, source, target, auxiliary)
print("Move disk {} from {} to {}".format(disks, source, target))
# move `N-1` discs from auxiliary to target using the source
# as an intermediate pole
move(disks - 1, auxiliary, source, target)
# Tower of Hanoi Problem
if __name__ == '__main__':
N = 3
move(N, 1, 2, 3)
``` |
{
"source": "jineshparakh/Blogify",
"score": 2
} |
#### File: app/posts/routes.py
```python
from app.tags.forms import TagForm
from flask import Blueprint, render_template, url_for, flash, redirect, request, abort
from flask_login import current_user, login_required
from werkzeug.utils import html
from app import db
from app.models import Post, Tag
from app.posts.forms import PostForm
import markdown
posts=Blueprint('posts', __name__)
@posts.route('/post/new', methods=['GET', 'POST'])
@login_required
def new_post():
form=PostForm()
tagForm = TagForm()
tags=Tag.query.all()
form.tags.choices = [(tag.id, tag.value) for tag in tags]
if form.validate_on_submit():
post=Post(title=form.title.data, content=repr(form.content.data), author=current_user, tags=Tag.query.filter(Tag.id.in_(form.tags.data)).all())
db.session.add(post)
db.session.commit()
flash(f'Your Post has been created!!', category='success')
return redirect(url_for('main.home'))
return render_template('create_post.html', title='New Post',legend='New Post', form=form, tags=tags, tagForm=tagForm)
@posts.route('/post/<int:post_id>')
def post(post_id):
post=Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@posts.route('/post/<int:post_id>/update', methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post=Post.query.get_or_404(post_id)
if post.author!=current_user:
abort(403)
form=PostForm()
tagForm = TagForm()
tags=Tag.query.all()
form.tags.choices = [(tag.id, tag.value) for tag in tags]
if form.validate_on_submit():
post.title=form.title.data
post.content=repr(form.content.data)
post.tags = Tag.query.filter(Tag.id.in_(form.tags.data)).all()
db.session.commit()
flash(f'Your Post has been updated!!', category='success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method=='GET':
form.title.data=post.title
form.content.data=post.content
selectedTags=post.tags
return render_template('create_post.html', title='Update Post', legend='Update Post', form=form, tags=tags, tagForm=tagForm, selectedTags=selectedTags)
return render_template('create_post.html', title='Update Post', legend='Update Post', form=form, tags=tags, tagForm=tagForm)
@posts.route('/post/<int:post_id>/delete', methods=['POST'])
@login_required
def delete_post(post_id):
post=Post.query.get_or_404(post_id)
if post.author!=current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash(f'Your Post has been deleted!!', category='success')
return redirect(url_for('main.home'))
```
#### File: app/users/routes.py
```python
from flask import Blueprint, render_template, url_for, flash, request, redirect
from flask_login import login_user, current_user, logout_user, login_required
from app import db, bcrypt
from app.users.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm
from app.users.utils import save_picture, send_reset_email
from app.models import User, Post
users=Blueprint('users', __name__)
@users.route('/register', methods=['POST','GET'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form=RegistrationForm()
if form.validate_on_submit():
hashed_password=bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user= User(username=form.username.data, email=form.email.data, password=<PASSWORD>)
try:
db.session.add(user)
db.session.commit()
flash(f'Account Created for {form.username.data}!, you can now login :)', category='success')
return redirect(url_for('users.login'))
except:
flash(f'The user is already registered!! Try to login!!', category='danger')
return render_template('register.html', title='Register', form=form)
@users.route('/login', methods=['POST','GET'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form=LoginForm()
if form.validate_on_submit():
user=User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page=request.args.get('next')
flash(f'Hey {user.username}! Good to see you back ;)', category='success')
if next_page:
return redirect(next_page)
else:
return redirect(url_for('main.home'))
else:
flash(f'Login Unsuccessful. Please check the email and/or Password', 'danger')
return render_template('login.html', title='Login', form=form)
@users.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.home'))
@users.route('/account', methods=['POST','GET'])
@login_required
def account():
form=UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file=save_picture(form.picture.data)
current_user.image_file=picture_file
current_user.username=form.username.data
current_user.email=form.email.data
db.session.commit()
flash(f'Account Details Updated!!', category='success')
elif request.method=='GET':
form.username.data=current_user.username
form.email.data=current_user.email
image_file=url_for('static', filename='profile_pics/'+current_user.image_file)
return render_template('account.html',title='Account', image_file=image_file, form=form)
@users.route('/user/<string:username>')
def user_posts(username):
page=request.args.get('page', default=1, type=int)
user=User.query.filter_by(username=username).first_or_404()
posts=Post.query.filter_by(author=user).order_by(Post.created_at.desc()).paginate(page=page, per_page=3)
return render_template('user_posts.html', posts=posts,user=user)
@users.route('/reset_password',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form=RequestResetForm()
if form.validate_on_submit():
user=User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash(f'Check Your Registered Email for further instructions', 'info')
return redirect(url_for('users.login'))
return render_template('reset_request.html',title='Reset Password', form=form)
@users.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('main.home'))
user=User.verify_reset_token(token)
if user is None:
flash(f'That is an invalid or expired token', 'warning')
return redirect(url_for('users.reset_request'))
form=ResetPasswordForm()
if form.validate_on_submit():
hashed_password=<PASSWORD>.generate_password_hash(form.password.data).decode('utf-8')
user.password=<PASSWORD>
db.session.commit()
flash(f'Password Updated! You can now login :)', category='success')
return redirect(url_for('users.login'))
return render_template('reset_token.html',title='Reset Password',form=form)
```
#### File: app/users/utils.py
```python
import os
import secrets
from PIL import Image
from flask import url_for
from flask_mail import Message
from app import app, mail
def save_picture(form_picture):
random_hex=secrets.token_hex(8)
_,f_ext=os.path.splitext(form_picture.filename)
picture_filename=random_hex+f_ext
picture_path=os.path.join(app.root_path, 'static/profile_pics', picture_filename)
output_size=(250,250)
i=Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_filename
def send_reset_email(user):
token=<PASSWORD>_reset_token()
msg=Message('Password Reset Request',
sender='<EMAIL>',
recipients=[user.email])
msg.body=f''' To reset your password visit:
{url_for('users.reset_token', token=token, _external=True)}
If you did not make this request, then simply ignore this email and no changes will be made
'''
mail.send(msg)
``` |
{
"source": "jinevraAI/jinevra",
"score": 2
} |
#### File: jinevra/jinevra/__init__.py
```python
def about():
return("jinevra democratizes AI-based language analysis. Learn more at jinevra.ai")
def confirm_import():
return("You've imported the Jinevra library!")
``` |
{
"source": "jinfagang/3d_detection_kit",
"score": 2
} |
#### File: jinfagang/3d_detection_kit/show_pc_open3d.py
```python
import os
import numpy as np
from open3d import *
import cv2
# a = 'training/velodyne/000900.bin'
# a = '/media/jintain/sg/permanent/datasets/KITTI/kitti_object_vis/data/object/training/velodyne/000000.bin'
a = 'data/testing/valodyne/000003.bin'
b = np.fromfile(a, dtype=np.float32)
print(b)
print(b.shape)
points = np.random.rand(10000, 3)
point_cloud = PointCloud()
point_cloud.points = Vector3dVector(points)
def custom_draw_geometry_with_key_callback(pcd):
def change_background_to_black(vis):
opt = vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
return False
def load_render_option(vis):
vis.get_render_option().load_from_json(
"../../TestData/renderoption.json")
return False
def capture_depth(vis):
depth = vis.capture_depth_float_buffer()
plt.imshow(np.asarray(depth))
plt.show()
return False
def capture_image(vis):
image = vis.capture_screen_float_buffer()
cv2.imshow('snap', np.asarray(image))
# cv2.im
cv2.waitKey(0)
return False
key_to_callback = {}
key_to_callback[ord("K")] = change_background_to_black
key_to_callback[ord("R")] = load_render_option
key_to_callback[ord(",")] = capture_depth
key_to_callback[ord(".")] = capture_image
draw_geometries_with_key_callbacks([pcd], key_to_callback)
# custom_draw_geometry_with_key_callback(point_cloud)
b = np.reshape(b, (-1, 4))
print(b)
b = b[:, :3]
print(b.shape)
# we need to know
point_cloud.points = Vector3dVector(b)
custom_draw_geometry_with_key_callback(point_cloud)
``` |
{
"source": "jinfagang/alfred",
"score": 2
} |
#### File: alfred/alfred/tests.py
```python
from utils.mana import welcome
from utils.log import logger as logging
from vis.image.det import visualize_det_cv2
import cv2
import numpy as np
from vis.image.get_dataset_label_map import coco_label_map_list
from vis.image.common import draw_rect_with_style
import torch
from dl.torch.common import print_tensor
from varname import varname
def a_func(num):
print(varname() + ': ' + str(num))
def clothes(func):
def wear():
print('Buy clothes!{}'.format(func.__name__))
return func()
return wear
@clothes
def body():
print('The body feels could!')
if __name__ == '__main__':
v = a_func(1098)
# welcome('')
# logging.info('hi hiu')
# logging.error('ops')
# a = cv2.imread('/home/jintian/Pictures/1.jpeg')
# dets = [
# [1, 0.9, 4, 124, 333, 256],
# [2, 0.7, 155, 336, 367, 485],
# ]
# dets = np.array(dets)
# print(type(a))
# draw_rect_with_style(a, (78, 478), (478, 223), (0, 255, 255), style='dashed')
# visualize_det_cv2(a, dets, coco_label_map_list, is_show=True)
aaa = torch.randn([1, 23, 45])
print_tensor(aaa)
```
#### File: vis/image/pose.py
```python
from .pose_dataset_info import DatasetInfo
from .pose_dataset_info import get_dataset_info_by_name
import cv2
import numpy as np
import math
def vis_pose_result(
img,
pose_result,
radius=4,
thickness=1,
kpt_score_thr=0.3,
dataset="TopDownCocoDataset",
show=False,
out_file=None,
):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str | np.ndarray): Image filename or loaded image.
pose_result (list[dict]): The results to draw over `img`, [N, 17, 3] for coco body
radius (int): Radius of circles.
thickness (int): Thickness of lines.
kpt_score_thr (float): The threshold to visualize the keypoints.
skeleton (list[tuple()]): Default None.
show (bool): Whether to show the image. Default True.
out_file (str|None): The filename of the output visualization image.
"""
# get dataset info
dataset_info = get_dataset_info_by_name(dataset)
skeleton = dataset_info.skeleton
pose_link_color = dataset_info.pose_link_color
pose_kpt_color = dataset_info.pose_kpt_color
img = imshow_keypoints(
img,
pose_result,
skeleton=skeleton,
kpt_score_thr=kpt_score_thr,
pose_kpt_color=pose_kpt_color,
pose_link_color=pose_link_color,
radius=radius,
thickness=thickness,
show_keypoint_weight=False,
)
if show:
cv2.imshow("pose result", img)
cv2.waitKey(0)
if out_file is not None:
cv2.imwrite(out_file, img)
return img
def imshow_keypoints(
img,
pose_result,
skeleton=None,
kpt_score_thr=0.3,
pose_kpt_color=None,
pose_link_color=None,
radius=4,
thickness=1,
show_keypoint_weight=False,
):
"""Draw keypoints and links on an image.
Args:
img (str or Tensor): The image to draw poses on. If an image array
is given, id will be modified in-place.
pose_result (list[kpts]): The poses to draw. Each element kpts is
a set of K keypoints as an Kx3 numpy.ndarray, where each
keypoint is represented as x, y, score.
kpt_score_thr (float, optional): Minimum score of keypoints
to be shown. Default: 0.3.
pose_kpt_color (np.array[Nx3]`): Color of N keypoints. If None,
the keypoint will not be drawn.
pose_link_color (np.array[Mx3]): Color of M links. If None, the
links will not be drawn.
thickness (int): Thickness of lines.
"""
img_h, img_w, _ = img.shape
for kpts in pose_result:
kpts = np.array(kpts, copy=False)
# draw each point on image
if pose_kpt_color is not None:
assert len(pose_kpt_color) == len(kpts)
for kid, kpt in enumerate(kpts):
x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
if kpt_score > kpt_score_thr:
if show_keypoint_weight:
img_copy = img.copy()
r, g, b = pose_kpt_color[kid]
cv2.circle(
img_copy,
(int(x_coord), int(y_coord)),
radius,
(int(r), int(g), int(b)),
-1,
cv2.LINE_AA,
)
transparency = max(0, min(1, kpt_score))
cv2.addWeighted(
img_copy, transparency, img, 1 - transparency, 0, dst=img
)
else:
r, g, b = pose_kpt_color[kid]
cv2.circle(
img,
(int(x_coord), int(y_coord)),
radius,
(int(r), int(g), int(b)),
-1,
cv2.LINE_AA,
)
# draw links
if skeleton is not None and pose_link_color is not None:
assert len(pose_link_color) == len(skeleton)
for sk_id, sk in enumerate(skeleton):
pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
if (
pos1[0] > 0
and pos1[0] < img_w
and pos1[1] > 0
and pos1[1] < img_h
and pos2[0] > 0
and pos2[0] < img_w
and pos2[1] > 0
and pos2[1] < img_h
and kpts[sk[0], 2] > kpt_score_thr
and kpts[sk[1], 2] > kpt_score_thr
):
r, g, b = pose_link_color[sk_id]
if show_keypoint_weight:
img_copy = img.copy()
X = (pos1[0], pos2[0])
Y = (pos1[1], pos2[1])
mX = np.mean(X)
mY = np.mean(Y)
length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
stickwidth = 2
polygon = cv2.ellipse2Poly(
(int(mX), int(mY)),
(int(length / 2), int(stickwidth)),
int(angle),
0,
360,
1,
)
cv2.fillConvexPoly(img_copy, polygon, (int(r), int(g), int(b)))
transparency = max(
0, min(1, 0.5 * (kpts[sk[0], 2] + kpts[sk[1], 2]))
)
cv2.addWeighted(
img_copy, transparency, img, 1 - transparency, 0, dst=img
)
else:
cv2.line(
img,
pos1,
pos2,
(int(r), int(g), int(b)),
thickness=thickness,
lineType=cv2.LINE_AA,
)
return img
``` |
{
"source": "jinfagang/instance_seg_tf",
"score": 2
} |
#### File: jinfagang/instance_seg_tf/inference.py
```python
import os
import os.path
import argparse
from glob import glob
import numpy as np
import cv2
import tensorflow as tf
slim = tf.contrib.slim
from enet import ENet, ENet_arg_scope
from clustering import cluster, get_instance_masks, save_instance_masks
import time
import glog as log
def rebuild_graph(sess, checkpoint_dir, input_image, batch_size, feature_dim):
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
num_initial_blocks = 1
skip_connections = False
stage_two_repeat = 2
with slim.arg_scope(ENet_arg_scope()):
_, _ = ENet(input_image,
num_classes=12,
batch_size=batch_size,
is_training=True,
reuse=None,
num_initial_blocks=num_initial_blocks,
stage_two_repeat=stage_two_repeat,
skip_connections=skip_connections)
graph = tf.get_default_graph()
last_prelu = graph.get_tensor_by_name('ENet/bottleneck5_1_last_prelu:0')
logits = slim.conv2d_transpose(last_prelu, feature_dim, [2, 2], stride=2,
scope='Instance/transfer_layer/conv2d_transpose')
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint)
return logits
def save_image_with_features_as_color(pred):
p_min = np.min(pred)
p_max = np.max(pred)
pred = (pred - p_min) * 255 / (p_max - p_min)
pred = pred.astype(np.uint8)
output_file_name = os.path.join(output_dir, 'color_{}.png'.format(str(i).zfill(4)))
cv2.imwrite(output_file_name, np.squeeze(pred))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--modeldir', default='saved_model/lane', help="Directory of trained model")
parser.add_argument('-i', '--indir', default='data/test_images',
help='Input image directory (jpg format)')
parser.add_argument('-o', '--outdir', default='log',
help='Output directory for inference images')
args = parser.parse_args()
data_dir = args.indir
output_dir = args.outdir
checkpoint_dir = args.modeldir
log.info('Load checkpoints from {}'.format(checkpoint_dir))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
image_paths = glob(os.path.join(data_dir, '*.jpg'))
image_paths += glob(os.path.join(data_dir, '*.png'))
image_paths.sort()
num_images = len(image_paths)
print('Got num images: {}'.format(num_images))
image_shape = (512, 512)
batch_size = 1
feature_dim = 3
### Limit GPU memory usage due to occasional crashes
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.5
with tf.Session(config=config) as sess:
input_image = tf.placeholder(tf.float32, shape=(None, image_shape[1], image_shape[0], 3))
logits = rebuild_graph(sess, checkpoint_dir, input_image, batch_size, feature_dim)
inference_time = 0
cluster_time = 0
for i, path in enumerate(image_paths):
image = cv2.resize(cv2.imread(path), image_shape, interpolation=cv2.INTER_LINEAR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_original = cv2.imread(path)
image = np.expand_dims(image, axis=0)
tic = time.time()
prediction = sess.run(logits, feed_dict={input_image: image})
pred_time = time.time() - tic
print('Inference time', pred_time)
inference_time += pred_time
pred_color = np.squeeze(prediction.copy())
print('Save prediction', i)
# save_image_with_features_as_color(pred_color)
pred_cluster = prediction.copy()
tic = time.time()
instance_mask = get_instance_masks(pred_cluster, bandwidth=1.)[0]
# save_instance_masks(prediction, output_dir, bandwidth=1., count=i)
print(instance_mask.shape)
output_file_name = os.path.join(output_dir, 'cluster_{}.png'.format(str(i).zfill(4)))
colors, counts = np.unique(instance_mask.reshape(image_shape[0] * image_shape[1], 3),
return_counts=True, axis=0)
max_count = 0
for color, count in zip(colors, counts):
if count > max_count:
max_count = count
bg_color = color
ind = np.where(instance_mask == bg_color)
instance_mask[ind] = 0.
instance_mask = cv2.addWeighted(np.squeeze(image), 1, instance_mask, 0.3, 0)
instance_mask = cv2.resize(instance_mask, (1280, 720))
clust_time = time.time() - tic
cluster_time += clust_time
res_img = cv2.cvtColor(instance_mask, cv2.COLOR_RGB2BGR)
cv2.imshow('res', res_img)
cv2.imshow('origin', image_original)
cv2.imwrite(output_file_name, res_img)
cv2.waitKey(0)
print('Mean inference time:', inference_time / num_images, 'fps:', num_images / inference_time)
print('Mean cluster time:', cluster_time / num_images, 'fps:', num_images / cluster_time)
print('Mean total time:', cluster_time / num_images + inference_time / num_images, 'fps:',
1. / (cluster_time / num_images + inference_time / num_images))
``` |
{
"source": "jinfagang/mmdetection",
"score": 2
} |
#### File: mmdet/datasets/custom_coco.py
```python
import numpy as np
from .coco import CocoDataset
from .registry import DATASETS
@DATASETS.register_module
class CustomCocoDataset(CocoDataset):
def __init__(self, classes, min_size=None, *args, **kwargs):
self.CLASSES = classes
super().__init__(*args, **kwargs)
self.min_size = min_size
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map. "masks" are raw annotations and not
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if self.min_size is not None:
if w < self.min_size or h < self.min_size:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
``` |
{
"source": "jinfagang/nb",
"score": 3
} |
#### File: torch/utils/common.py
```python
import torch
import math
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
``` |
{
"source": "jinfagang/pilgrim_torch2trt",
"score": 2
} |
#### File: torch2trt_dynamic/converters/expand.py
```python
import tensorrt as trt
from ..torch2trt_dynamic import *
from ..module_test import add_module_test
from .repeat import *
from .exview import convert_exview
@tensorrt_converter('torch.Tensor.expand')
def convert_expand(ctx):
old_args = ctx.method_args
input = ctx.method_args[0]
if isinstance(ctx.method_args[1:], int):
sizes = ctx.method_args[1:]
else:
sizes = ctx.method_args[1]
output = ctx.method_return
repeat_shape = []
for i in range(output.dim()):
if i < output.dim()-input.dim():
repeat_shape.append(output.shape[i])
else:
repeat_shape.append(output.shape[i]//input.shape[i+input.dim()-output.dim()])
ctx.method_args = [input]+repeat_shape
ctx.method_return = output
convert_repeat(ctx)
ctx.method_args=old_args
```
#### File: torch2trt_dynamic/converters/floor_divide.py
```python
from ..torch2trt_dynamic import *
from ..module_test import add_module_test
@tensorrt_converter('torch.floor_divide')
@tensorrt_converter('torch.Tensor.floor_divide')
@tensorrt_converter('torch.Tensor.floor_divide_')
@tensorrt_converter('torch.Tensor.__floordiv__')
@tensorrt_converter('torch.Tensor.__ifloordiv__')
def convert_floor_div(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
input_a_trt, input_b_trt = trt_(ctx.network, input_a, input_b)
output = ctx.method_return
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.FLOOR_DIV)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.Tensor.__rfloordiv__')
def convert_rfloor_div(ctx):
input_a = ctx.method_args[1] # inputs switched for rdiv
input_b = ctx.method_args[0]
input_a_trt, input_b_trt = trt_(ctx.network, input_a, input_b)
output = ctx.method_return
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.FLOOR_DIV)
output._trt = layer.get_output(0)
```
#### File: torch2trt_dynamic/converters/linear.py
```python
from ..torch2trt_dynamic import *
from ..module_test import add_module_test
import torch
from .t import convert_t
from .matmul import convert_matmul
from .sum import convert_sum
@tensorrt_converter('torch.nn.functional.linear')
def convert_linear(ctx):
old_method_args = ctx.method_args
old_method_kwargs = ctx.method_kwargs
input = ctx.method_args[0]
weight = get_arg(ctx, 'weight', pos=1, default=None)
bias = get_arg(ctx, 'bias', pos=2, default=None)
output = ctx.method_return
# transpose weight
weight_transpose = weight.t()
ctx.method_args = [weight]
ctx.method_kwargs = {}
ctx.method_return = weight_transpose
convert_t(ctx)
# matmul
matmul_output = input.matmul(weight_transpose)
ctx.method_args = [input, weight]
ctx.method_kwargs = {}
ctx.method_return = matmul_output
convert_matmul(ctx)
# add bias
if bias is not None:
add_bias_output = matmul_output + bias
ctx.method_args = [matmul_output, bias]
ctx.method_return = add_bias_output
convert_sum(ctx)
output._trt = add_bias_output._trt
else:
output._trt = matmul_output._trt
ctx.method_args = old_method_args
ctx.method_kwargs = old_method_kwargs
ctx.method_return = output
```
#### File: torch2trt_dynamic/converters/pixel_shuffle.py
```python
from torch2trt_dynamic.torch2trt_dynamic import *
from torch2trt_dynamic.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.pixel_shuffle')
def convert_pixel_shuffle(ctx):
input = ctx.method_args[0]
upscale_factor = get_arg(ctx, "upscale_factor", pos=1, default=None)
input_trt = trt_(ctx.network, input)
input_shape_trt = ctx.network.add_shape(input_trt).get_output(0)
output = ctx.method_return
batch_shape_trt = ctx.network.add_slice(
input_shape_trt, [0], [1], [1]).get_output(0)
channel_shape_trt = ctx.network.add_slice(
input_shape_trt, [1], [1], [1]).get_output(0)
height_shape_trt = ctx.network.add_slice(
input_shape_trt, [2], [1], [1]).get_output(0)
width_shape_trt = ctx.network.add_slice(
input_shape_trt, [3], [1], [1]).get_output(0)
upscale_shape_trt = trt_(ctx.network, torch.tensor(
[upscale_factor], dtype=torch.int32).to(input.device))
upscale_p2_trt = ctx.network.add_elementwise(
upscale_shape_trt, upscale_shape_trt, trt.ElementWiseOperation.PROD).get_output(0)
new_channel_shape_trt = ctx.network.add_elementwise(
channel_shape_trt, upscale_p2_trt, trt.ElementWiseOperation.FLOOR_DIV).get_output(0)
# (b, c0, s, s, h, w)
pre_shape_trt = ctx.network.add_concatenation([batch_shape_trt,
new_channel_shape_trt,
upscale_shape_trt,
upscale_shape_trt,
height_shape_trt,
width_shape_trt]).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, pre_shape_trt)
layer.second_transpose = (0,1,4,2,5,3)
permute_trt = layer.get_output(0)
new_height_shape_trt = ctx.network.add_elementwise(
height_shape_trt, upscale_shape_trt, trt.ElementWiseOperation.PROD).get_output(0)
new_width_shape_trt = ctx.network.add_elementwise(
width_shape_trt, upscale_shape_trt, trt.ElementWiseOperation.PROD).get_output(0)
post_shape_trt = ctx.network.add_concatenation([batch_shape_trt,
new_channel_shape_trt,
new_height_shape_trt,
new_width_shape_trt]).get_output(0)
layer = ctx.network.add_shuffle(permute_trt)
layer.set_input(1, post_shape_trt)
output._trt = layer.get_output(0)
```
#### File: torch2trt_dynamic/converters/squeeze.py
```python
from torch2trt_dynamic.torch2trt_dynamic import *
from torch2trt_dynamic.module_test import add_module_test
from .identity import *
@tensorrt_converter('torch.Tensor.squeeze')
@tensorrt_converter('torch.squeeze')
def convert_squeeze(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=None)
if dim is None:
dim = list(filter(lambda x:input.shape[x]==1, range(len(input.shape))))
else:
if input.shape[dim]!=1:
ctx.method_args = [input]
convert_identity(ctx)
return
if dim <0:
dim = len(input.shape)+dim
dim = [dim]
input_trt = trt_(ctx.network, input)
shape_trt = ctx.network.add_shape(input_trt).get_output(0)
output = ctx.method_return
reverse_dim = list(filter(lambda x: x not in dim, range(len(input.shape))))
reverse_dim_trt = trt_(ctx.network, torch.tensor(reverse_dim,dtype=torch.int32).to(input.device))
new_shape_trt = ctx.network.add_gather(shape_trt, reverse_dim_trt, 0).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0)
```
#### File: torch2trt_dynamic/converters/std.py
```python
from torch2trt_dynamic.torch2trt_dynamic import *
from .mean import convert_mean
from .sub import convert_sub
from .mul import convert_mul
from torch2trt_dynamic.module_test import add_module_test
@tensorrt_converter('torch.std')
@tensorrt_converter('torch.Tensor.std')
def convert_std(ctx):
old_method_args = ctx.method_args
old_method_kwargs = ctx.method_kwargs
input = ctx.method_args[0]
input_trt = trt_(ctx.network, input)
output = ctx.method_return
dim = get_arg(ctx, 'dim', pos=1, default=None)
unbiased = get_arg(ctx, 'unbiased', pos=2, default=True)
keepdim = get_arg(ctx, 'keepdim', pos=3, default=False)
# compute mean
if dim is not None:
mean_val = input.mean(dim, True)
ctx.method_args = [input, dim, True]
ctx.method_kwargs = []
ctx.method_return = mean_val
convert_mean(ctx)
else:
mean_val = input.mean()
ctx.method_args = [input, None, False]
ctx.method_kwargs = []
ctx.method_return = mean_val
convert_mean(ctx)
# compute x-mean
x_minus_mean = input-mean_val
ctx.method_args = [input, mean_val]
ctx.method_return = x_minus_mean
convert_sub(ctx)
# compute (x-mean)*(x-mean)
x_pow = x_minus_mean*x_minus_mean
ctx.method_args = [x_minus_mean, x_minus_mean]
ctx.method_return = x_pow
convert_mul(ctx)
# compute average
x_pow_trt = trt_(ctx.network, x_pow)
# get dims from args or kwargs
if dim is None:
dim = tuple(range(len(input.shape)))
# convert list to tuple
if isinstance(dim, list):
dim = tuple(dim)
if not isinstance(dim, tuple):
dim = (dim, )
dim = tuple([d if d>=0 else len(input.shape)+d for d in dim])
# create axes bitmask for reduce layer
axes = 0
for d in dim:
axes |= 1<<d
if unbiased:
layer = ctx.network.add_reduce(x_pow_trt, trt.ReduceOperation.SUM, axes, keepdim)
sum_trt = layer.get_output(0)
# compute reduce size
shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt, dim[0], 1)
layer = ctx.network.add_identity(shape_trt)
layer.set_output_type(0, trt.float32)
shape_trt = layer.get_output(0)
for d in dim[1:]:
other_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt, d, 1)
layer = ctx.network.add_identity(other_shape_trt)
layer.set_output_type(0, trt.float32)
other_shape_trt = layer.get_output(0)
layer = ctx.network.add_elementwise(shape_trt, other_shape_trt, trt.ElementWiseOperation.PROD)
layer.set_output_type(0, trt.float32)
shape_trt = layer.get_output(0)
# reduce size minus one
one_trt = trt_(ctx.network, input.new_ones((1,)).float())
layer = ctx.network.add_elementwise(shape_trt, one_trt, trt.ElementWiseOperation.SUB)
layer.set_output_type(0, sum_trt.dtype)
shape_minus_one_trt = layer.get_output(0)
layer = ctx.network.add_shuffle(shape_minus_one_trt)
layer.reshape_dims = (1,) * len(sum_trt.shape)
shape_minus_one_trt = layer.get_output(0)
# multi scale
layer = ctx.network.add_elementwise(sum_trt, shape_minus_one_trt, trt.ElementWiseOperation.DIV)
avg_trt = layer.get_output(0)
else:
layer = ctx.network.add_reduce(x_pow_trt, trt.ReduceOperation.AVG, axes, keepdim)
avg_trt = layer.get_output(0)
# reduce shape might be zero
need_reshape = False
if len(avg_trt.shape)==0:
need_reshape = True
layer = ctx.network.add_shuffle(avg_trt)
layer.reshape_dims = (1,)
avg_trt = layer.get_output(0)
layer = ctx.network.add_unary(avg_trt, trt.UnaryOperation.SQRT)
output._trt = layer.get_output(0)
if need_reshape:
layer = ctx.network.add_shuffle(output._trt)
layer.reshape_dims = tuple()
output._trt = layer.get_output(0)
ctx.method_args = old_method_args
ctx.method_kwargs = old_method_kwargs
ctx.method_return = output
```
#### File: torch2trt_dynamic/converters/t.py
```python
from torch2trt_dynamic.torch2trt_dynamic import *
from torch2trt_dynamic.module_test import add_module_test
from .transpose import convert_transpose
@tensorrt_converter('torch.Tensor.t')
def convert_t(ctx):
input = ctx.method_args[0]
input_trt = trt_(ctx.network, input)
output = ctx.method_return
# permutation -1 because TRT does not include batch dim
if len(input.shape)==1:
layer = ctx.network.add_identity(input_trt)
output._trt = layer.get_output(0)
else:
ctx.method_args = [input, 1, 0]
ctx.method_kwargs = {}
convert_transpose(ctx)
``` |
{
"source": "jinfagang/pytorch_name_net",
"score": 2
} |
#### File: pytorch_name_net/utils/model_utils.py
```python
import torch
import os
import glob
import numpy as np
import time
import math
def load_previous_model(model, checkpoints_dir, model_prefix):
f_list = glob.glob(os.path.join(checkpoints_dir, model_prefix) + '-*.pth')
start_epoch = 1
if len(f_list) >= 1:
epoch_list = [int(i.split('-')[-1].split('.')[0]) for i in f_list]
last_checkpoint = f_list[np.argmax(epoch_list)]
start_epoch = np.max(epoch_list)
if os.path.exists(last_checkpoint):
print('load from {}'.format(last_checkpoint))
model.load_state_dict(torch.load(last_checkpoint, map_location=lambda storage, loc: storage))
return model, start_epoch
def save_model(model, checkpoints_dir, model_prefix, epoch, max_keep=5):
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
f_list = glob.glob(os.path.join(checkpoints_dir, model_prefix) + '-*.pth')
if len(f_list) >= max_keep + 2:
# this step using for delete the more than 5 and litter one
epoch_list = [int(i.split('-')[-1].split('.')[0]) for i in f_list]
to_delete = [f_list[i] for i in np.argsort(epoch_list)[-max_keep:]]
for f in to_delete:
os.remove(f)
name = model_prefix + '-{}.pth'.format(epoch)
file_path = os.path.join(checkpoints_dir, name)
torch.save(model.state_dict(), file_path)
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / percent
rs = es - s
return 'cost: %s, estimate: %s %s ' % (as_minutes(s), as_minutes(rs), str(round(percent*100, 2)) + '%')
``` |
{
"source": "jinfagang/tensorflow_yolov3",
"score": 3
} |
#### File: tensorflow_yolov3/core/darknet53.py
```python
import tensorflow as tf
import cv2
class DarkNet53(object):
def __init__(self):
pass
@staticmethod
def fix_padding(inputs, kernel_size, mode='CONSTANT'):
"""
fixed padding whatever inputs is
this code is get from ResNet from models repo
:param inputs:
:param kernel_size:
:param mode:
:return:
"""
pad_total = kernel_size - 1
pad_start = pad_total // 2
pad_end = pad_total - pad_start
padded_inputs = tf.pad(inputs, [[0, 0], [pad_start, pad_end], [pad_start, pad_end], [0, 0]], mode=mode)
return padded_inputs
def conv2d_fixed_padding(self, inputs, filters, kernel_size, strides=1):
if strides > 1:
inputs = self.fix_padding(inputs, kernel_size)
inputs = tf.layers.conv2d(inputs, filters=filters, kernel_size=kernel_size,
strides=strides, padding=('SAME' if strides == 1 else 'VALID'))
return inputs
def _darknet53_block(self, inputs, filters):
shortcut = inputs
# 2 layer convolution in DarkNet53, first is 1x1, next is 3x3 with fixed padding
# only filters are various
inputs = self.conv2d_fixed_padding(inputs, filters, 1)
inputs = self.conv2d_fixed_padding(inputs, filters * 2, 3)
# residual operation, why plus? just concat
inputs += shortcut
return inputs
def build_model(self, inputs):
"""
main process of building DarNet53
:return:
"""
# 1th part
inputs = self.conv2d_fixed_padding(inputs, 32, 3)
inputs = self.conv2d_fixed_padding(inputs, 64, 3, strides=2)
inputs = self._darknet53_block(inputs, 32)
# connector 1 - 128
inputs = self.conv2d_fixed_padding(inputs, 128, 3, strides=2)
# 2nd part
for i in range(2):
inputs = self._darknet53_block(inputs, 64)
# connector 2 - 256
inputs = self.conv2d_fixed_padding(inputs, 256, 3, strides=2)
# 3rd part
for i in range(8):
inputs = self._darknet53_block(inputs, 128)
# connector 3 - 512
route_1 = inputs
inputs = self.conv2d_fixed_padding(inputs, 512, 3, strides=2)
# 4th
for i in range(8):
inputs = self._darknet53_block(inputs, 256)
# connector 4 - 1024
route_2 = inputs
inputs = self.conv2d_fixed_padding(inputs, 1024, 3, strides=2)
# 5th
for i in range(4):
inputs = self._darknet53_block(inputs, 512)
# original DartNet53 have a more average pooling layer, and a soft-max, but we are not using
# for classify, so just drop it
return route_1, route_2, inputs
darknet53 = DarkNet53()
```
#### File: tensorflow_yolov3/core/nms.py
```python
import numpy as np
from .config import global_config
class NMS(object):
def __init__(self):
pass
@staticmethod
def _iou(box1, box2):
b1_x0, b1_y0, b1_x1, b1_y1 = box1
b2_x0, b2_y0, b2_x1, b2_y1 = box2
int_x0 = max(b1_x0, b2_x0)
int_y0 = max(b1_y0, b2_y0)
int_x1 = min(b1_x1, b2_x1)
int_y1 = min(b1_y1, b2_y1)
int_area = (int_x1 - int_x0) * (int_y1 - int_y0)
b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0)
b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0)
iou = int_area / (b1_area + b2_area - int_area + 1e-05)
return iou
def nms(self, predictions_with_boxes):
"""
do nms
:param predictions_with_boxes:
:param confidence_threshold:
:param iou_threshold:
:return:
"""
conf_mask = np.expand_dims((predictions_with_boxes[:, :, 4] > global_config.nms_cf_threshold), -1)
predictions = predictions_with_boxes * conf_mask
result = {}
for i, image_pred in enumerate(predictions):
shape = image_pred.shape
non_zero_idxs = np.nonzero(image_pred)
image_pred = image_pred[non_zero_idxs]
image_pred = image_pred.reshape(-1, shape[-1])
bbox_attrs = image_pred[:, :5]
classes = image_pred[:, 5:]
classes = np.argmax(classes, axis=-1)
unique_classes = list(set(classes.reshape(-1)))
for cls in unique_classes:
cls_mask = classes == cls
cls_boxes = bbox_attrs[np.nonzero(cls_mask)]
cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]
cls_scores = cls_boxes[:, -1]
cls_boxes = cls_boxes[:, :-1]
while len(cls_boxes) > 0:
box = cls_boxes[0]
score = cls_scores[0]
if not cls in result:
result[cls] = []
result[cls].append((box, score))
cls_boxes = cls_boxes[1:]
ious = np.array([self._iou(box, x) for x in cls_boxes])
iou_mask = ious < global_config.nms_iou_threshold
cls_boxes = cls_boxes[np.nonzero(iou_mask)]
cls_scores = cls_scores[np.nonzero(iou_mask)]
return result
nms = NMS()
``` |
{
"source": "jinfagang/torch2trt_dynamic",
"score": 2
} |
#### File: torch2trt_dynamic/converters/adaptive_max_pool1d.py
```python
import tensorrt as trt
from torch2trt_dynamic.plugins import create_adaptivepool_plugin
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.nn.functional.adaptive_max_pool1d')
def convert_adaptive_max_pool1d(ctx):
input = ctx.method_args[0]
output_size = get_arg(ctx, 'output_size', pos=1, default=0)
output = ctx.method_return
input_trt = trt_(ctx.network, input)
if output_size == 1:
# use reduce as max pool2d
shape_length = len(input.shape)
axes = (1 << (shape_length - 1))
keepdim = True
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.MAX,
axes, keepdim)
output._trt = layer.get_output(0)
else:
output_size = (output_size, 1)
# input.unsqueeze(-1)
layer = ctx.network.add_shuffle(input_trt)
layer.reshape_dims = (0, 0, 0, 1)
input_trt = layer.get_output(0)
# adaptive pool 2d
plugin = create_adaptivepool_plugin(
'adaptive_avg_pool2d_' + str(id(input)),
output_size=output_size,
pooling_type=trt.PoolingType.MAX)
layer = ctx.network.add_plugin_v2(inputs=[input_trt], plugin=plugin)
output_trt = layer.get_output(0)
layer = ctx.network.add_shuffle(output_trt)
layer.reshape_dims = (0, 0, 0)
output_trt = layer.get_output(0)
output._trt = output_trt
```
#### File: torch2trt_dynamic/converters/addcmul.py
```python
import numpy as np
import tensorrt as trt
import torch
from torch2trt_dynamic.module_test import add_module_test
from torch2trt_dynamic.torch2trt_dynamic import (tensor_trt_get_shape_trt,
tensorrt_converter, trt_)
@tensorrt_converter('torch.addcmul')
@tensorrt_converter('torch.Tensor.addcmul')
def convert_addcmul(ctx):
tensor0 = ctx.method_args[0]
value = 1
next_tensor_offset = 0
if len(ctx.method_args) == 4:
value = ctx.method_args[1]
next_tensor_offset = 1
if 'value' in ctx.method_kwargs:
value = ctx.method_kwargs['value']
tensor1 = ctx.method_args[1 + next_tensor_offset]
tensor2 = ctx.method_args[2 + next_tensor_offset]
input0_trt, input1_trt, input2_trt = trt_(ctx.network, tensor0, tensor1,
tensor2)
output = ctx.method_return
output_mul_trt = ctx.network.add_elementwise(
input1_trt, input2_trt, trt.ElementWiseOperation.PROD).get_output(0)
if value != 1 or value != 1.:
shift = np.zeros([1], np.float32)
scale = np.array([value], np.float32)
if len(tensor0.shape) < 4:
input_shape_trt = tensor_trt_get_shape_trt(ctx.network, input0_trt)
add_dim = 4 - len(tensor0.shape)
add_trt = trt_(ctx.network, torch.ones([add_dim],
dtype=torch.int32))
new_input_shape_trt = ctx.network.add_concatenation(
[add_trt, input_shape_trt]).get_output(0)
layer = ctx.network.add_shuffle(output_mul_trt)
layer.set_input(1, new_input_shape_trt)
output_mul_trt = layer.get_output(0)
output_mul_trt = ctx.network.add_scale(output_mul_trt,
trt.ScaleMode.UNIFORM, shift,
scale).get_output(0)
if len(tensor0.shape) < 4:
layer = ctx.network.add_shuffle(output_mul_trt)
layer.set_input(1, input_shape_trt)
output_mul_trt = layer.get_output(0)
output_trt = ctx.network.add_elementwise(
input0_trt, output_mul_trt, trt.ElementWiseOperation.SUM).get_output(0)
output._trt = output_trt
class AddcmulTestModule(torch.nn.Module):
def __init__(self, value):
super(AddcmulTestModule, self).__init__()
self.value = value
def forward(self, x, y, z):
return torch.addcmul(x, self.value, y, z)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 5), (1, 4, 5),
(1, 4, 5)])
def test_addcmul():
return AddcmulTestModule(2)
```
#### File: torch2trt_dynamic/converters/chunk.py
```python
import tensorrt as trt
import torch
from ..module_test import add_module_test
from ..torch2trt_dynamic import (get_arg, slice_shape_trt,
tensor_trt_get_shape_trt, tensorrt_converter,
trt_)
from .split import convert_split
@tensorrt_converter('torch.chunk')
@tensorrt_converter('torch.Tensor.chunk')
def convert_chunk(ctx):
# https://github.com/pytorch/pytorch/blob/b90fc52c687a6851047f18ec9d06fb998efe99dd/aten/src/ATen/native/TensorShape.cpp
input = get_arg(ctx, 'input', 0, None)
input_trt = trt_(ctx.network, input)
chunks = get_arg(ctx, 'chunks', 1, 0)
dim = get_arg(ctx, 'dim', 2, 0)
if dim < 0:
dim = input.dim() + dim
outputs = ctx.method_return
if len(outputs) != chunks:
convert_split(ctx)
return
input_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt)
head_shape_trt = slice_shape_trt(ctx.network, input_shape_trt, 0, dim)
chunk_shape_trt = slice_shape_trt(ctx.network, input_shape_trt, dim, 1, 1)
tail_shape_trt = slice_shape_trt(ctx.network, input_shape_trt, dim + 1)
chunk_trt = trt_(ctx.network, int(chunks))
one_trt = trt_(ctx.network, 1)
zero_trt = trt_(ctx.network, 0)
# chunk 0~n-2
chunk_size_trt = ctx.network.add_elementwise(
chunk_shape_trt, chunk_trt, trt.ElementWiseOperation.SUM).get_output(0)
chunk_size_trt = ctx.network.add_elementwise(
chunk_size_trt, one_trt, trt.ElementWiseOperation.SUB).get_output(0)
chunk_size_trt = ctx.network.add_elementwise(
chunk_size_trt, chunk_trt,
trt.ElementWiseOperation.FLOOR_DIV).get_output(0)
# chunk n-1
chunk_last_trt = ctx.network.add_elementwise(
chunk_trt, one_trt, trt.ElementWiseOperation.SUB).get_output(0)
chunk_last_trt = ctx.network.add_elementwise(
chunk_size_trt, chunk_last_trt,
trt.ElementWiseOperation.PROD).get_output(0)
chunk_last_trt = ctx.network.add_elementwise(
chunk_shape_trt, chunk_last_trt,
trt.ElementWiseOperation.SUB).get_output(0)
stride_trt = ctx.network.add_concatenation([one_trt] *
len(input.shape)).get_output(0)
if head_shape_trt is not None:
head_start_trt = ctx.network.add_concatenation([zero_trt] *
dim).get_output(0)
if tail_shape_trt is not None:
tail_start_trt = ctx.network.add_concatenation(
[zero_trt] * (len(input.shape) - 1 - dim)).get_output(0)
start_trt = []
size_trt = []
chunk_start_trt = zero_trt
if head_shape_trt is not None:
start_trt.append(head_start_trt)
size_trt.append(head_shape_trt)
start_trt.append(chunk_start_trt)
size_trt.append(chunk_size_trt)
if tail_shape_trt is not None:
start_trt.append(tail_start_trt)
size_trt.append(tail_shape_trt)
start_trt = ctx.network.add_concatenation(start_trt).get_output(0)
size_trt = ctx.network.add_concatenation(size_trt).get_output(0)
input_dim = len(input.shape)
for i in range(chunks - 1):
layer = ctx.network.add_slice(input_trt, [0] * input_dim,
[1] * input_dim, [1] * input_dim)
layer.set_input(1, start_trt)
layer.set_input(2, size_trt)
layer.set_input(3, stride_trt)
outputs[i]._trt = layer.get_output(0)
start_trt = []
chunk_start_trt = ctx.network.add_elementwise(
chunk_start_trt, chunk_size_trt,
trt.ElementWiseOperation.SUM).get_output(0)
if head_shape_trt is not None:
start_trt.append(head_start_trt)
start_trt.append(chunk_start_trt)
if tail_shape_trt is not None:
start_trt.append(tail_start_trt)
start_trt = ctx.network.add_concatenation(start_trt).get_output(0)
size_trt = []
if head_shape_trt is not None:
size_trt.append(head_shape_trt)
size_trt.append(chunk_last_trt)
if tail_shape_trt is not None:
size_trt.append(tail_shape_trt)
size_trt = ctx.network.add_concatenation(size_trt).get_output(0)
layer = ctx.network.add_slice(input_trt, [0] * input_dim, [1] * input_dim,
[1] * input_dim)
layer.set_input(1, start_trt)
layer.set_input(2, size_trt)
layer.set_input(3, stride_trt)
outputs[chunks - 1]._trt = layer.get_output(0)
class TorchChunk(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(TorchChunk, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return torch.chunk(x, *self.args, **self.kwargs)
class TensorChunk(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(TensorChunk, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return x.chunk(*self.args, **self.kwargs)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_torch_chunk_1_1():
return TorchChunk(1, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_torch_chunk_2_1():
return TorchChunk(2, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_torch_chunk_3_1():
return TorchChunk(3, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_torch_chunk_3_2():
return TorchChunk(3, 2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_tensor_chunk_3_2():
return TensorChunk(3, 2)
```
#### File: torch2trt_dynamic/converters/flatten.py
```python
import tensorrt as trt
import torch
from ..torch2trt_dynamic import get_arg, tensorrt_converter, trt_
from .identity import convert_identity
@tensorrt_converter('torch.Tensor.flatten')
@tensorrt_converter('torch.flatten')
def convert_flatten(ctx):
input = ctx.method_args[0]
start_dim = get_arg(ctx, 'start_dim', pos=1, default=0)
end_dim = get_arg(ctx, 'end_dim', pos=2, default=-1)
if start_dim == -1:
start_dim = len(input.shape) - 1
if end_dim == -1:
end_dim = len(input.shape) - 1
if start_dim == end_dim:
ctx.method_args = [input]
convert_identity(ctx)
return
input_trt = trt_(ctx.network, input)
# shuffle of bool is not allowed in cudnn
if input.dtype == torch.bool:
layer = ctx.network.add_identity(input_trt)
layer.set_output_type(0, trt.DataType.INT32)
input_trt = layer.get_output(0)
shape_trt = ctx.network.add_shape(input_trt).get_output(0)
output = ctx.method_return
shape1_trt = None
shape2_trt = None
if start_dim != 0:
slice1_start = [0]
slice1_size = [start_dim]
slice1_stride = [1]
shape1_trt = ctx.network.add_slice(shape_trt, slice1_start,
slice1_size,
slice1_stride).get_output(0)
if end_dim != len(input.shape) - 1:
slice2_start = [end_dim + 1]
slice2_size = [len(input.shape) - end_dim - 1]
slice2_stride = [1]
shape2_trt = ctx.network.add_slice(shape_trt, slice2_start,
slice2_size,
slice2_stride).get_output(0)
slice_mid_start = [start_dim]
slice_mid_size = [end_dim - start_dim + 1]
slice_mid_stride = [1]
shape_mid_trt = ctx.network.add_slice(shape_trt, slice_mid_start,
slice_mid_size,
slice_mid_stride).get_output(0)
# reduce mid
mid_trt = ctx.network.add_slice(shape_mid_trt, [0], [1], [1]).get_output(0)
for i in range(end_dim - start_dim):
other_trt = ctx.network.add_slice(shape_mid_trt, [i + 1], [1],
[1]).get_output(0)
mid_trt = ctx.network.add_elementwise(
mid_trt, other_trt, trt.ElementWiseOperation.PROD).get_output(0)
shape_mid_trt = mid_trt
if shape1_trt is None and shape2_trt is None:
new_shape_trt = shape_mid_trt
elif shape1_trt is None:
new_shape_trt = ctx.network.add_concatenation(
[shape_mid_trt, shape2_trt]).get_output(0)
elif shape2_trt is None:
new_shape_trt = ctx.network.add_concatenation(
[shape1_trt, shape_mid_trt]).get_output(0)
else:
new_shape_trt = ctx.network.add_concatenation(
[shape1_trt, shape_mid_trt, shape2_trt]).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output_trt = layer.get_output(0)
if input.dtype == torch.bool:
layer = ctx.network.add_identity(output_trt)
layer.set_output_type(0, trt.DataType.BOOL)
output_trt = layer.get_output(0)
output._trt = output_trt
```
#### File: torch2trt_dynamic/converters/LayerNorm.py
```python
import numpy as np
import tensorrt as trt
from ..torch2trt_dynamic import (tensor_trt_get_shape_trt, tensorrt_converter,
torch_dim_to_trt_axes, trt_)
@tensorrt_converter('torch.nn.LayerNorm.forward')
def convert_LayerNorm(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
normalized_shape = module.normalized_shape
weight = module.weight
bias = module.bias
eps = module.eps
output = ctx.method_return
eps_np = np.array([eps], dtype=np.float32)
keep_dims = True
input_trt = trt_(ctx.network, input)
if len(input.shape) == 3:
input_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt)
new_input_shape_trt = ctx.network.add_concatenation(
[trt_(ctx.network, 1), input_shape_trt]).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_input_shape_trt)
input_trt = layer.get_output(0)
reduce_axes = torch_dim_to_trt_axes(
tuple(
range(
len(input_trt.shape) - len(normalized_shape),
len(input_trt.shape))))
mean_trt = ctx.network.add_reduce(input_trt, trt.ReduceOperation.AVG,
reduce_axes, keep_dims).get_output(0)
# compute variance over spatial (include eps, to reduce layer count)
delta_trt = ctx.network.add_elementwise(
input_trt, mean_trt, trt.ElementWiseOperation.SUB).get_output(0)
var_trt = ctx.network.add_scale(delta_trt, trt.ScaleMode.UNIFORM,
np.zeros_like(eps_np),
np.ones_like(eps_np),
2 * np.ones_like(eps_np)).get_output(0)
var_trt = ctx.network.add_reduce(var_trt, trt.ReduceOperation.AVG,
reduce_axes, keep_dims).get_output(0)
# compute sqrt(var + eps)
var_trt = ctx.network.add_scale(var_trt, trt.ScaleMode.UNIFORM, eps_np,
np.ones_like(eps_np),
0.5 * np.ones_like(eps_np)).get_output(0)
# compute final result
result_trt = ctx.network.add_elementwise(
delta_trt, var_trt, trt.ElementWiseOperation.DIV).get_output(0)
if len(input.shape) == 3:
layer = ctx.network.add_shuffle(result_trt)
layer.set_input(1, input_shape_trt)
result_trt = layer.get_output(0)
if weight is not None:
assert weight.ndim <= input.ndim
while weight.ndim < input.ndim:
weight = weight.unsqueeze(0)
weight_trt = trt_(ctx.network, weight)
layer = ctx.network.add_elementwise(result_trt, weight_trt,
trt.ElementWiseOperation.PROD)
result_trt = layer.get_output(0)
if bias is not None:
assert bias.ndim <= input.ndim
while bias.ndim < input.ndim:
bias = bias.unsqueeze(0)
bias_trt = trt_(ctx.network, bias)
layer = ctx.network.add_elementwise(result_trt, bias_trt,
trt.ElementWiseOperation.SUM)
result_trt = layer.get_output(0)
output._trt = result_trt
```
#### File: torch2trt_dynamic/converters/logical.py
```python
import tensorrt as trt
from ..torch2trt_dynamic import tensorrt_converter, trt_
from .unary import __convert_unary
def convert_compare(ctx, compare_op):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
input_a_trt, input_b_trt = trt_(ctx.network, input_a, input_b)
output = ctx.method_return
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, compare_op)
layer.set_output_type(0, trt.bool)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.gt')
@tensorrt_converter('torch.Tensor.gt')
@tensorrt_converter('torch.Tensor.__gt__')
def convert_greater(ctx):
convert_compare(ctx, trt.ElementWiseOperation.GREATER)
@tensorrt_converter('torch.lt')
@tensorrt_converter('torch.Tensor.lt')
@tensorrt_converter('torch.Tensor.__lt__')
def convert_less(ctx):
convert_compare(ctx, trt.ElementWiseOperation.LESS)
@tensorrt_converter('torch.Tensor.__and__')
def convert_and(ctx):
convert_compare(ctx, trt.ElementWiseOperation.AND)
@tensorrt_converter('torch.Tensor.__or__')
def convert_or(ctx):
convert_compare(ctx, trt.ElementWiseOperation.OR)
@tensorrt_converter('torch.eq')
@tensorrt_converter('torch.Tensor.eq')
@tensorrt_converter('torch.Tensor.__eq__')
def convert_equal(ctx):
convert_compare(ctx, trt.ElementWiseOperation.EQUAL)
@tensorrt_converter('torch.ge')
@tensorrt_converter('torch.Tensor.ge')
@tensorrt_converter('torch.Tensor.__ge__')
def convert_greaterequal(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
greater = input_a > input_b
equal = input_a == input_b
ctx.method_return = greater
convert_greater(ctx)
ctx.method_return = equal
convert_equal(ctx)
ctx.method_args = [greater, equal]
ctx.method_return = output
convert_or(ctx)
@tensorrt_converter('torch.le')
@tensorrt_converter('torch.Tensor.le')
@tensorrt_converter('torch.Tensor.__le__')
def convert_lessequal(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
less = input_a < input_b
equal = input_a == input_b
ctx.method_return = less
convert_less(ctx)
ctx.method_return = equal
convert_equal(ctx)
ctx.method_args = [less, equal]
ctx.method_return = output
convert_or(ctx)
@tensorrt_converter('torch.ne')
@tensorrt_converter('torch.Tensor.ne')
@tensorrt_converter('torch.Tensor.__ne__')
def convert_ne(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
equal = input_a == input_b
ctx.method_return = equal
convert_equal(ctx)
ctx.method_args = [equal]
ctx.method_return = output
__convert_unary(ctx, trt.UnaryOperation.NOT)
@tensorrt_converter('torch.logical_xor')
@tensorrt_converter('torch.Tensor.logical_xor')
@tensorrt_converter('torch.Tensor.__xor__')
def convert_xor(ctx):
convert_compare(ctx, trt.ElementWiseOperation.XOR)
```
#### File: torch2trt_dynamic/converters/mean.py
```python
import tensorrt as trt
import torch
from torch2trt_dynamic.module_test import add_module_test
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.mean')
@tensorrt_converter('torch.Tensor.mean')
def convert_mean(ctx):
input = ctx.method_args[0]
input_trt = trt_(ctx.network, input)
output = ctx.method_return
dim = get_arg(ctx, 'dim', pos=1, default=None)
keep_dims = get_arg(ctx, 'keepdim', pos=2, default=False)
# get dims from args or kwargs
if dim is None:
dim = tuple(range(len(input.shape)))
# convert list to tuple
if isinstance(dim, list):
dim = tuple(dim)
if not isinstance(dim, tuple):
dim = (dim, )
dim = tuple([d if d >= 0 else len(input.shape) + d for d in dim])
# create axes bitmask for reduce layer
axes = 0
for d in dim:
axes |= 1 << d
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.AVG, axes,
keep_dims)
output._trt = layer.get_output(0)
class Mean(torch.nn.Module):
def __init__(self, dim, keepdim):
super(Mean, self).__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return x.mean(self.dim, self.keepdim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_channel():
return Mean(1, False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_tuple():
return Mean((1, 2), False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_keepdim():
return Mean(1, True)
```
#### File: torch2trt_dynamic/converters/squeeze.py
```python
import torch
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
from .identity import convert_identity
@tensorrt_converter('torch.Tensor.squeeze')
@tensorrt_converter('torch.squeeze')
def convert_squeeze(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=None)
if dim is None:
dim = list(
filter(lambda x: input.shape[x] == 1, range(len(input.shape))))
else:
if input.shape[dim] != 1:
ctx.method_args = [input]
convert_identity(ctx)
return
if dim < 0:
dim = len(input.shape) + dim
dim = [dim]
input_trt = trt_(ctx.network, input)
shape_trt = ctx.network.add_shape(input_trt).get_output(0)
output = ctx.method_return
reverse_dim = list(filter(lambda x: x not in dim, range(len(input.shape))))
reverse_dim_trt = trt_(
ctx.network,
torch.tensor(reverse_dim, dtype=torch.int32).to(input.device))
new_shape_trt = ctx.network.add_gather(shape_trt, reverse_dim_trt,
0).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0)
```
#### File: torch2trt_dynamic/converters/unfold.py
```python
from torch2trt_dynamic.plugins import create_torchunfold_plugin
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.nn.functional.unfold')
def convert_unfold(ctx):
input = ctx.method_args[0]
kernel_size = get_arg(ctx, 'kernel_size', pos=1, default=0)
dilation = get_arg(ctx, 'dilation', pos=2, default=1)
padding = get_arg(ctx, 'padding', pos=3, default=0)
stride = get_arg(ctx, 'stride', pos=4, default=1)
output = ctx.method_return
input_trt = trt_(ctx.network, input)
plugin = create_torchunfold_plugin(
'unfold_' + str(id(input)),
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
layer = ctx.network.add_plugin_v2(inputs=[input_trt], plugin=plugin)
output._trt = layer.get_output(0)
```
#### File: torch2trt_dynamic/converters/view.py
```python
import torch
from torch2trt_dynamic.module_test import add_module_test
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_, trt_cast)
from .size import IntWarper
@tensorrt_converter('torch.Tensor.reshape')
@tensorrt_converter('torch.Tensor.view')
def convert_view(ctx):
input = ctx.method_args[0]
size = get_arg(ctx, 'shape', pos=1, default=[])
if isinstance(size, int):
size = tuple(ctx.method_args[1:])
input_trt = trt_(ctx.network, input)
output = ctx.method_return
if input.dtype == torch.bool:
input_trt = trt_cast(ctx.network, input_trt, torch.int32)
# check if there are shape tensor
is_shape_tensor = False
for s in size:
if isinstance(s, IntWarper):
is_shape_tensor = True
break
# negative shape might cause overflow, forbid for now
for s in size:
if s < 0:
is_shape_tensor = True
break
# compute shape tensor
if is_shape_tensor:
shape_trt = []
for idx, s in enumerate(size):
if isinstance(s, IntWarper):
shape_trt.append(s._trt)
else:
const_shape_trt = trt_(
ctx.network, input.new_tensor([s], dtype=torch.int32))
shape_trt.append(const_shape_trt)
shape_trt = ctx.network.add_concatenation(shape_trt).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
if is_shape_tensor:
layer.set_input(1, shape_trt)
else:
layer.reshape_dims = output.shape
output_trt = layer.get_output(0)
if input.dtype == torch.bool:
output_trt = trt_cast(ctx.network, output_trt, torch.bool)
output._trt = output_trt
class View(torch.nn.Module):
def __init__(self, *dims):
super(View, self).__init__()
self.dims = dims
def forward(self, x):
return x.view(*self.dims)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_view_1d():
return View(1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_view_2d():
return View(1, 1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_view_3d():
return View(1, 1, 1, -1)
```
#### File: torch2trt_dynamic/torch2trt_dynamic/__init__.py
```python
import tensorrt as trt
from .converters import * # noqa: F401,F403
from .torch2trt_dynamic import * # noqa: F401,F403
def load_plugins():
import ctypes
import os
ctypes.CDLL(
os.path.join(os.path.dirname(__file__), 'libtorch2trt_dynamic.so'))
registry = trt.get_plugin_registry()
torch2trt_creators = [
c for c in registry.plugin_creator_list
if c.plugin_namespace == 'torch2trt_dynamic'
]
for c in torch2trt_creators:
registry.register_creator(c, 'torch2trt_dynamic')
try:
load_plugins()
PLUGINS_LOADED = True
except OSError:
PLUGINS_LOADED = False
```
#### File: torch2trt_dynamic/plugins/create_meshgrid_plugin.py
```python
import numpy as np
import tensorrt as trt
def create_meshgrid_plugin(layer_name,
num_inputs,
slice_dims=[2, 3],
starts=[0., 0.],
strides=[1., 1.]):
creator = trt.get_plugin_registry().get_plugin_creator(
'MeshGridPluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_num_inputs = trt.PluginField(
'num_inputs', np.array([int(num_inputs)], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_num_inputs)
pf_slice_dims = trt.PluginField('slice_dims',
np.array(slice_dims, dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_slice_dims)
pf_starts = trt.PluginField('starts', np.array(starts, dtype=np.float32),
trt.PluginFieldType.FLOAT32)
pfc.append(pf_starts)
pf_strides = trt.PluginField('strides',
np.array(strides, dtype=np.float32),
trt.PluginFieldType.FLOAT32)
pfc.append(pf_strides)
return creator.create_plugin(layer_name, pfc)
```
#### File: torch2trt_dynamic/plugins/create_torchembedding_plugin.py
```python
import numpy as np
import tensorrt as trt
def create_torchembedding_plugin(layer_name, weight):
creator = trt.get_plugin_registry().get_plugin_creator(
'TorchEmbeddingPluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
num_embeddings = weight.shape[0]
embedding_dim = weight.shape[1]
pf_num_embeddings = trt.PluginField(
'num_embeddings', np.array([num_embeddings], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_num_embeddings)
pf_embedding_dim = trt.PluginField(
'embedding_dim', np.array([embedding_dim], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_embedding_dim)
return creator.create_plugin(layer_name, pfc)
```
#### File: torch2trt_dynamic/plugins/create_torchflip_plugin.py
```python
import numpy as np
import tensorrt as trt
def create_torchflip_plugin(layer_name, dims):
creator = trt.get_plugin_registry().get_plugin_creator(
'TorchFlipPluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_dims = trt.PluginField('dims', np.array(dims, dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_dims)
return creator.create_plugin(layer_name, pfc)
```
#### File: torch2trt_dynamic/torch2trt_dynamic/shape_converter.py
```python
import torch
def get_tensor_shape(self):
return self.size()
old_get_attribute = torch.Tensor.__getattribute__
def new_getattribute__(self, name):
if name == 'shape':
return get_tensor_shape(self)
else:
return old_get_attribute(self, name)
class ShapeConverter:
def __init__(self):
pass
def __enter__(self):
torch.Tensor.__getattribute__ = new_getattribute__
def __exit__(self, type, val, tb):
torch.Tensor.__getattribute__ = old_get_attribute
``` |
{
"source": "JinfengChen/CNVRepeat",
"score": 2
} |
#### File: src/CNVRepeat/main.py
```python
from __future__ import print_function
import argparse
import collections
import json
import logging
import sys
import os
from CNVRepeat import log
from CNVRepeat import options as opts
from CNVRepeat import pipeline
from CNVRepeat import analysis
def load_config(config_path):
try:
config = json.load(open(config_path))
except ValueError as err:
print("Error parsing configuration file '{}': '{}'\n Check that this is a properly formatted JSON file!".format(config_path, err))
sys.exit(1)
options = opts.Options.deserialize(config, config_path)
return options
def run(options):
analysis_steps = prepare_analysis(options)
runner = pipeline.Runner(options)
print("Running")
for analysis_name, analysis_step in analysis_steps.items():
print ('Running analysis: "{}"'.format(analysis_name))
runner.run_stage(analysis_step, analysis_name)
def prepare_analysis(options):
analysis_steps = collections.OrderedDict()
if options.method == 'single_copy_exon':
if not os.path.exists(options.bed) or not os.path.splitext(options.bed)[1] == '.bed':
analysis_steps["Single Copy Exon"] = analysis.single_copy_exon.SingleCopyExonStep
analysis_steps["Genome Coverage Estimator"] = analysis.estimate_genome_coverage_bed.EstimateGenomeCoverageStep
analysis_steps["Genome Coverage Merger"] = analysis.estimate_genome_coverage_bed.CombineGenomeCoverageStep
elif options.method == 'single_copy_exon':
if not os.path.exists(options.bed) or not os.path.splitext(options.bed)[1] == '.bed':
analysis_steps["Random Region"] = analysis.single_copy_exon.RandomRegionStep
analysis_steps["Genome Coverage Estimator"] = analysis.estimate_genome_coverage_bed.EstimateGenomeCoverageStep
analysis_steps["Genome Coverage Merger"] = analysis.estimate_genome_coverage_bed.CombineGenomeCoverageStep
elif options.method == 'goleft':
analysis_steps["Genome Coverage Estimator Goleft"] = analysis.estimate_genome_coverage_bed.EstimateGenomeCoverageGoleftStep
analysis_steps["Repaet Coverage Estimator"] = analysis.estimate_repeat_coverage.EstimateRepeatCoverageStep
analysis_steps["Repeat Copy Number"] = analysis.estimate_repeat_copy_number.EstimateRepeatCopyNumberStep
return analysis_steps
def main():
parser = argparse.ArgumentParser(description="CNVRepeat: estimate copy number of repeat sequence in the genome")
parser.add_argument("--config", help="Path to configuration.json file")
parser.add_argument("--local", action="store_true", help="run job locally in multiprocess mode")
parser.add_argument("--scheduler", help="run job using scheduler, SLURM, SGE, PBS/Torque")
parser.add_argument("--cpu", default=1, help="number of cpu")
parser.add_argument("--method", default='goleft', help="method for estimation of genome coverage: goleft, single_copy_exon, random_region")
parser.add_argument("--random_dna_length", default=1000, help="length of DNA for random selection of method random_region")
parser.add_argument("--random_dna_number", default=100000, help="number of DNA for random selection of method random_region")
parser.add_argument("--debug", action="store_true", help="run in debug mode")
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
options = load_config(args.config)
options.debug = args.debug
options.method = args.method
options.random_dna_length = args.random_dna_length
options.random_dna_number = args.random_dna_number
log.log_command(options, sys.argv)
run(options)
if __name__ == '__main__':
main()
```
#### File: src/CNVRepeat/utilities.py
```python
import collections
import errno
import os
import re
import string
import sys
class BinaryNotFoundError(Exception):
pass
def ensure_dir(directory):
try:
os.makedirs(directory)
except OSError as err:
if err.errno != errno.EEXIST:
raise
class cd:
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def get_key(options_dict, key, type_=basestring, default="error", error_msg="configuration"):
if default == "error" and key not in options_dict:
print "CONFIG ERROR: {} key '{}' is missing".format(error_msg, key)
sys.exit(1)
value = options_dict.get(key, default)
if type_ is not None and not isinstance(value, type_):
print "CONFIG ERROR: {} key '{}' should be type '{}', not '{}'".format(
error_msg, key, type_.__name__, type(value).__name__)
sys.exit(1)
return value
comp = string.maketrans('ATCGatcg','TAGCtagc')
def revcomp(seq):
return seq[::-1].translate(comp)
def cpu_count_physical():
"""
tries to get the number of physical (ie not virtual) cores
"""
try:
import psutil
return psutil.cpu_count(logical=False)
except:
import multiprocessing
return multiprocessing.cpu_count()
def check_memory(logger, min_memory=16):
try:
import psutil
physical_mem_gb = psutil.virtual_memory().total / (1000.**3)
if physical_mem_gb < min_memory:
logger.log("WARNING: GROC-SVs typically requires ~16 GB of memory to run; "
"you appear to have only {:.1f}GB".format(physical_mem_gb))
except:
pass
``` |
{
"source": "JinfengChen/RelocaTE2",
"score": 3
} |
#### File: RelocaTE2/scripts/clean_false_positive.py
```python
import sys
from collections import defaultdict
import re
import os
import argparse
def usage():
test="name"
message='''
python Python.py --input ALL.all_nonref_insert.gff --refte ~/BigData/00.RD/RelocaTE_i/Simulation/Reference/MSU7.Chr3.fa.RepeatMasker.out.bed
'''
print message
def fasta_id(fastafile):
fastaid = defaultdict(str)
for record in SeqIO.parse(fastafile,"fasta"):
fastaid[record.id] = 1
return fastaid
#Retro1 ACGTC not.give Chr4 14199..14203 - T:7 R:4 L:3 ST:21 SR:9 SL:12
def txt2gff(infile, outfile, ins_type):
#print infile, outfile
ofile = open(outfile, 'w')
count = 0
r_pos = re.compile(r'(\d+)\.\.(\d+)')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
#print line
if len(line) > 2:
unit = re.split(r'\t',line)
count += 1
chro, start, end = ['', 0, 0]
chro = unit[3]
strand = unit[5]
te_name= unit[0]
m = r_pos.search(unit[4])
if m:
start = m.groups(0)[0]
end = m.groups(0)[1]
r_count = re.sub(r'\D+', '', unit[7])
l_count = re.sub(r'\D+', '', unit[8])
r_supp = re.sub(r'\D+', '', unit[10])
l_supp = re.sub(r'\D+', '', unit[11])
r_id = 'repeat_%s_%s_%s' %(chro, start, end)
print >> ofile, '%s\t%s\t%s\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s;TSD=%s;Note=%s;Right_junction_reads=%s;Left_junction_reads=%s;Right_support_reads=%s;Left_support_reads=%s;' %(chro, 'RelocaTE2', unit[2], start, end, strand, r_id, te_name, unit[1], ins_type, r_count, l_count, r_supp, l_supp)
ofile.close()
#Chr3 not.give RelocaTE_i 283493 283504 . - . ID=repeat_Chr3_283493_283504;TSD=ATGCCATCAAGG;Note=Non-reference,
#not found in reference;Right_junction_reads:4;Left_junction_reads:1;Right_support_reads:4;Left_support_reads:5;
#Chr3 281479 284272 TE110112 +
def Overlap_TE_boundary(prefix, refte, distance, bedtools):
data = defaultdict(str)
final_gff = '%s.gff' %(prefix)
raw_gff = '%s.raw.gff' %(prefix)
all_gff = '%s.all.gff' %(prefix)
high_gff = '%s.high_conf.gff' %(prefix)
clean_gff = '%s.clean.gff' %(prefix)
infile = '%s.overlap' %(prefix)
outfile= '%s.remove.gff' %(prefix)
os.system('%s window -w %s -a %s -b %s > %s' %(bedtools, int(distance) + 10, final_gff, refte, infile))
if not os.path.isfile(infile) or not os.path.getsize(infile) > 0:
return 1
print 'filter existing TE: %s bp' %(distance)
ofile = open(outfile, 'w')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
temp = defaultdict(str)
attrs = re.split(r';', unit[8])
print line
for attr in attrs:
if not attr == '':
attr = re.sub(r':', '=', attr)
idx, value = re.split(r'\=', attr)
temp[idx] = value
if int(temp['Right_junction_reads']) == 0 or int(temp['Left_junction_reads']) == 0:
#support by one junction
#within 10 bp interval of intact TE boundary
#print >> ofile, '\t'.join(unit[:9])
if int(unit[3]) >= int(unit[10]) - int(distance) and int(unit[3]) <= int(unit[10]) + int(distance):
print >> ofile, '\t'.join(unit[:9])
elif int(unit[3]) >= int(unit[11]) - int(distance) and int(unit[3]) <= int(unit[11]) + int(distance):
print >> ofile, '\t'.join(unit[:9])
elif int(unit[4]) >= int(unit[10]) - int(distance) and int(unit[4]) <= int(unit[10]) + int(distance):
print >> ofile, '\t'.join(unit[:9])
elif int(unit[4]) >= int(unit[11]) - int(distance) and int(unit[4]) <= int(unit[11]) + int(distance):
print >> ofile, '\t'.join(unit[:9])
ofile.close()
if not os.path.isfile(outfile) or not os.path.getsize(outfile) > 0:
#nothing to remove
print 'nothing to remove'
os.system('mv %s %s' %(final_gff, raw_gff))
os.system('cp %s %s' %(raw_gff, all_gff))
os.system('grep -v \"singleton\|insufficient_data\|supporting_reads\" %s > %s' %(raw_gff, final_gff))
os.system('grep -v -e \"Right_junction_reads=1;Left_junction_reads=0\" -e \"Right_junction_reads=0;Left_junction_reads=1\" %s > %s' %(final_gff, high_gff))
os.system('rm %s.overlap %s.remove.gff' %(prefix, prefix))
else:
print 'remove by bedtool'
os.system('%s intersect -v -a %s -b %s > %s' %(bedtools, final_gff, outfile, clean_gff))
os.system('mv %s %s' %(final_gff, raw_gff))
os.system('cp %s %s' %(clean_gff, all_gff))
os.system('grep -v \"singleton\|insufficient_data\|supporting_reads\" %s > %s' %(clean_gff, final_gff))
os.system('grep -v -e \"Right_junction_reads=1;Left_junction_reads=0\" -e \"Right_junction_reads=0;Left_junction_reads=1\" %s > %s' %(final_gff, high_gff))
os.system('rm %s.overlap %s.remove.gff %s.clean.gff' %(prefix, prefix, prefix))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-r', '--refte')
parser.add_argument('-d', '--distance', default='3', type=int)
parser.add_argument('-b', '--bedtools')
parser.add_argument('-o', '--output')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.input) > 0 and len(args.refte) > 0
except:
usage()
sys.exit(2)
#if not os.path.isfile(args.input) or not os.path.getsize(args.input) > 0:
#txt2gff('%s.txt' %(os.path.splitext(args.input)[0]), args.input, 'non_reference')
#os.system('bedtools window -w 10 -a %s -b %s > %s.overlap' %(args.input, args.refte, os.path.splitext(args.input)[0]))
Overlap_TE_boundary(os.path.splitext(args.input)[0], args.refte, args.distance, args.bedtools)
if __name__ == '__main__':
main()
```
#### File: RelocaTE2/scripts/clean_pairs_memory.py
```python
import sys
from collections import defaultdict
import re
import os
import argparse
import glob
def usage():
test="name"
message='''
python clean_pairs_memory.py --1 fastq1 --2 fastq2 --repeat path/te_containing_fq --fq_dir original_fq_dir --seqtk pathtoseqtk > unPaired.fq
Takes pairs of fastq files, which is trimmed from repeat blat results seperately, and find their mates in each other, in TE_containing fastqs and original fastqs.
*.matched: contain reads that have mates in eigher trimmed fastq or in original fastq (whether includes these in TE_containing need to test).
*.unPaired.fq: contains trimmed reads that do not have mates found and contain the mate pairs of reads that matched to middle of repeat only if the mate pair is not repeat, but not reads themselve as they are part of repeat (whether includes these mates in TE_containing need to test).
'''
print message
#we only need these reads matched to TE but not used as flanking_fq
#store only these reads not labeled with @read_500_403/1:middle or @read_500_403/1:end:5
#in some case read id in pairs are the same, not labeled with /1 or /2, or .r/.f
def parse_fastq(fq_file):
data =defaultdict(lambda : str)
#s = re.compile(r':(middle|start|end)')
s1= re.compile(r'(\S+)(\.[rf])')
s2= re.compile(r'(\S+)(\/[12])')
with open (fq_file, 'r') as filehd:
for line in filehd:
line = line.rstrip()
header = re.split(r' ', line[1:])[0]
read_t = '1'
#if s.search(header):
# continue
m1 = s1.search(header)
m2 = s2.search(header)
if m1:
header = m1.groups(0)[0]
read_t = m1.groups(0)[1]
elif m2:
header = m2.groups(0)[0]
read_t = m2.groups(0)[1]
#header = re.sub(r'\/.*', '', header)
seq = filehd.next().rstrip()
qualh = filehd.next().rstrip()
qual = filehd.next().rstrip()
data[header] = read_t
#print header, read_t
return data
def parse_fastq_flanking(fq_file):
s = re.compile(r'(.*):(middle|start|end)')
s1= re.compile(r'(\S+)\.[rf]')
s2= re.compile(r'(\S+)\/[12]')
fq_dict = defaultdict(lambda : list)
with open (fq_file, 'r') as filehd:
for line in filehd:
line = line.rstrip()
#header = line[1:]
#header_to_store = header
header = re.split(r' ', line[1:])[0]
m = s.search(header)
header_to_store = m.groups(0)[0] if m else header
pos = m.groups(0)[1] if m else 'unknown'
#print '%s\t%s' %(header_to_store, pos)
m1 = s1.search(header)
m2 = s2.search(header)
if m1:
header_to_store = m1.groups(0)[0]
elif m2:
header_to_store = m2.groups(0)[0]
#print header_to_store, pos
#header = re.sub(r'\/.*', '', header)
seq = filehd.next().rstrip()
qualh = filehd.next().rstrip()
qual = filehd.next().rstrip()
record = '@%s\n%s\n%s\n%s' %(header, seq, qualh, qual)
fq_dict[header_to_store] = [pos, record, header]
#print fq_dict[header_to_store][0], fq_dict[header_to_store][1]
return fq_dict
def parse_fastq_default(fq_file):
s1= re.compile(r'(\S+)\.[rf]')
s2= re.compile(r'(\S+)\/[12]')
fq_dict = defaultdict(lambda : str)
with open (fq_file, 'r') as filehd:
for line in filehd:
line = line.rstrip()
#header = line[1:]
header = re.split(r' ', line[1:])[0]
header_to_store = header
m1 = s1.search(header)
m2 = s2.search(header)
if m1:
header_to_store = m1.groups(0)[0]
elif m2:
header_to_store = m2.groups(0)[0]
#header = re.sub(r'\/.*', '', header)
seq = filehd.next().rstrip()
qualh = filehd.next().rstrip()
qual = filehd.next().rstrip()
record = '@%s\n%s\n%s\n%s' %(header, seq, qualh, qual)
fq_dict[header_to_store] = record
return fq_dict
def match_trimmed(fq1, fq2, fq1_match, fq2_match, fq_unPaired, fq_unPaired_info):
fq1_dict = parse_fastq_flanking(fq1)
fq2_dict = parse_fastq_flanking(fq2)
ofile1 = open(fq1_match, 'w')
ofile2 = open(fq2_match, 'w')
ofile3 = open(fq_unPaired, 'w')
ofile4 = open(fq_unPaired_info, 'w')
#only deal with :end/start here, keep unpaired middle in dictionary
for hd in sorted(fq2_dict.keys()):
#print hd
if not fq2_dict[hd][0] == 'middle':
#fq2 is not middle
#print hd
if fq1_dict.has_key(hd) and fq1_dict[hd][0] != 'middle':
#paired and both matched to end of repeat, same end or different end?
##these informations should be recorded and used as supporting reads
#print '1'
print >> ofile1, fq1_dict[hd][1]
print >> ofile2, fq2_dict[hd][1]
del fq1_dict[hd]
del fq2_dict[hd]
elif fq1_dict.has_key(hd) and fq1_dict[hd][0] == 'middle':
#paired but mate in fq1 is middle, write fq2 to unpaired and delete both
#print '2'
print >> ofile3, fq2_dict[hd][1]
print >> ofile4, '%s\t%s' %(fq2_dict[hd][2], 2)
del fq1_dict[hd]
del fq2_dict[hd]
else:
#not paired in trimmed fastq. do nothing, keep id and find pair in orignal fastq
#print '3'
pass
else:
#fq2 is middle
if fq1_dict.has_key(hd) and fq1_dict[hd][0] != 'middle':
#paired but mate in fq2 is middle, write fq1 to unpaired and delete both
#these informations should be recorded and used as supporting reads
print >> ofile3, fq1_dict[hd][1]
print >> ofile4, '%s\t%s' %(fq1_dict[hd][2], 1)
del fq1_dict[hd]
del fq2_dict[hd]
elif fq1_dict.has_key(hd) and fq1_dict[hd][0] == 'middle':
#paired but both in middle, useless for insertion delete both
del fq1_dict[hd]
del fq2_dict[hd]
pass
else:
#not paired in trimmed fastq, do nothing, keep id and find pair in orignal fastq
pass
ofile1.close()
ofile2.close()
ofile3.close()
ofile4.close()
#return dictionary which includes unpaired middles and unpaired end/start
return (fq1_dict, fq2_dict)
def match_support(fq1_dict, fq2_0, fq2_te, fq1_match, fq2_match, fq_unPaired, fq_unPaired_info, read_flag, fq1_id_temp, fq2_0_temp, seqtk):
ofile1 = open(fq1_match, 'a')
ofile2 = open(fq2_match, 'a')
ofile3 = open(fq_unPaired, 'a')
ofile5 = open(fq_unPaired_info, 'a')
#deal with mates in te_containing fastq
fq2_te_dict = parse_fastq(fq2_te)
for hd in sorted(fq1_dict.keys()):
if fq1_dict[hd][0] == 'middle':
if fq2_te_dict.has_key(hd):
#reads have their mate matched to repeat but not at start/end or middle
#delete reads, useless for insertion
del fq1_dict[hd]
else:
if fq2_te_dict.has_key(hd):
#reads have their mate matched to repeat but not at start/end or middle
#reads matched to start/end, write to unPaired and delete from fq1
print >> ofile3, fq1_dict[hd][1]
print >> ofile5, '%s\t%s' %(fq1_dict[hd][2], read_flag)
del fq1_dict[hd]
#deal with mates in original fastq
#find the mate of one reads in large fastq file or bam file is slow, how to speed up this. Maybe the only way to speed up is get clipped reads
#and their mates, unproperly mapped and their mates into fastq and start from there.
if fq2_te_dict.values()[0] == '1':
ofile4 = open(fq1_id_temp, 'w')
for hd in sorted(fq1_dict.keys()):
print >> ofile4, hd
ofile4.close()
else:
ofile4 = open(fq1_id_temp, 'w')
for hd in sorted(fq1_dict.keys()):
print >> ofile4, '%s%s' %(hd, fq2_te_dict.values()[0])
ofile4.close()
#get subset of fastq from original fastq
#cmd = 'seqtk subseq %s %s > %s' %(fq2_0, fq1_id_temp, fq2_0_temp)
#print cmd
os.system('%s subseq %s %s > %s' %(seqtk, fq2_0, fq1_id_temp, fq2_0_temp))
fq2_0_temp_dict = parse_fastq_default(fq2_0_temp)
#os.system('rm %s %s' %(fq1_id_temp, fq2_0_temp))
#write paired and unPaired reads
for hd in sorted(fq1_dict.keys()):
if fq1_dict[hd][0] == 'middle':
if fq2_0_temp_dict.has_key(hd):
##use mates as supporting reads
print >> ofile3, fq2_0_temp_dict[hd]
else:
##no mates found, useless. impossible if input is paired
pass
else:
if fq2_0_temp_dict.has_key(hd):
##paired, write both
print >> ofile1, fq1_dict[hd][1]
print >> ofile2, fq2_0_temp_dict[hd]
else:
##no mates found, write unpaired. impossible if input is paired
print >> ofile3, fq1_dict[hd][1]
ofile1.close()
ofile2.close()
ofile3.close()
ofile5.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-1', '--fq1')
parser.add_argument('-2', '--fq2')
parser.add_argument('-r', '--repeat')
parser.add_argument('-f', '--fq_dir')
parser.add_argument('-s', '--seqtk')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.fq1) > 0 and len(args.fq2) > 0 and len(args.repeat) and len(args.fq_dir)
except:
usage()
sys.exit(2)
fastqs = glob.glob('%s/*.f*q*' %(args.fq_dir))
suffix = ''
s = re.compile(r'(\.f\w*?q.*?$)')
if s.search(fastqs[0]):
suffix = s.search(fastqs[0]).groups(0)[0]
fq1_te = '%s/%s' %(args.repeat, re.sub(r'.flankingReads.fq', r'.ContainingReads.fq', os.path.split(args.fq1)[1]))
fq2_te = '%s/%s' %(args.repeat, re.sub(r'.flankingReads.fq', r'.ContainingReads.fq', os.path.split(args.fq2)[1]))
fq1_0 = '%s/%s' %(args.fq_dir, re.sub(r'.te_repeat.flankingReads.fq', r'%s' %(suffix), os.path.split(args.fq1)[1]))
fq2_0 = '%s/%s' %(args.fq_dir, re.sub(r'.te_repeat.flankingReads.fq', r'%s' %(suffix), os.path.split(args.fq2)[1]))
fq1_match = '%s.matched' %(args.fq1)
fq2_match = '%s.matched' %(args.fq2)
fq_unPaired = '%s.unPaired.fq' %(os.path.splitext(args.fq1)[0])
fq_unPaired_info = '%s.unPaired.info' %(os.path.splitext(args.fq1)[0])
#print '%s\n%s\n%s\n%s' %(args.fq1, fq1_te, fq1_0, fq_unPaired)
#print '%s\n%s\n%s\n%s' %(args.fq2, fq2_te, fq2_0, fq_unPaired)
#write pairs that exists in trimmed files (*.flankingReads.fq) to *.matched
#write pairs that have their mates in trimmed files, but are middles, to *.unPaired.fq
#Return the id of left reads to get their mates from original fastq
fq1_dict, fq2_dict = match_trimmed(args.fq1, args.fq2, fq1_match, fq2_match, fq_unPaired, fq_unPaired_info)
#write pairs that have their mates in te_containing fastq in unPaired
#write pairs get from trimmed and original fastq to *.matched
fq1_id_temp = '%s.fq1_id_temp.list' %(os.path.splitext(args.fq1)[0])
fq2_0_temp = '%s.fq2_0_temp.fq' %(os.path.splitext(args.fq2)[0])
#print '%s\n%s' %(fq1_id_temp, fq2_0_temp)
match_support(fq1_dict, fq2_0, fq2_te, fq1_match, fq2_match, fq_unPaired, fq_unPaired_info, 1, fq1_id_temp, fq2_0_temp, args.seqtk)
fq2_id_temp = '%s.fq2_id_temp.list' %(os.path.splitext(args.fq2)[0])
fq1_0_temp = '%s.fq1_0_temp.fq' %(os.path.splitext(args.fq1)[0])
#print '%s\n%s' %(fq2_id_temp, fq1_0_temp)
match_support(fq2_dict, fq1_0, fq1_te, fq2_match, fq1_match, fq_unPaired, fq_unPaired_info, 2, fq2_id_temp, fq1_0_temp, args.seqtk)
if __name__ == '__main__':
main()
``` |
{
"source": "jinfeng/jinfeng-pants-fork",
"score": 2
} |
#### File: backend/maven_layout/maven_layout.py
```python
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.codegen.targets.java_antlr_library import JavaAntlrLibrary
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_antlr_library import PythonAntlrLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.targets.doc import Page
from pants.backend.core.targets.resources import Resources
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.benchmark import Benchmark
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.java_tests import JavaTests
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.base.source_root import SourceRoot
def maven_layout(parse_context, basedir=''):
"""Sets up typical maven project source roots for all built-in pants target types.
Shortcut for ``source_root('src/main/java', *java targets*)``,
``source_root('src/main/python', *python targets*)``, ...
:param string basedir: Instead of using this BUILD file's directory as
the base of the source tree, use a subdirectory. E.g., instead of
expecting to find java files in ``src/main/java``, expect them in
``**basedir**/src/main/java``.
"""
def root(path, *types):
SourceRoot.register_mutable(os.path.join(parse_context.rel_path, basedir, path), *types)
root('src/main/antlr', JavaAntlrLibrary, Page, PythonAntlrLibrary)
root('src/main/java', AnnotationProcessor, JavaAgent, JavaLibrary, JvmBinary, Page, Benchmark)
root('src/main/protobuf', JavaProtobufLibrary, Page)
root('src/main/python', Page, PythonBinary, PythonLibrary)
root('src/main/resources', Page, Resources)
root('src/main/scala', JvmBinary, Page, ScalaLibrary, Benchmark)
root('src/main/thrift', JavaThriftLibrary, Page, PythonThriftLibrary)
root('src/test/java', JavaLibrary, JavaTests, Page, Benchmark)
root('src/test/python', Page, PythonLibrary, PythonTests)
root('src/test/resources', Page, Resources)
root('src/test/scala', JavaTests, Page, ScalaLibrary, Benchmark)
```
#### File: project_info/tasks/projectutils.py
```python
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import defaultdict
def get_jar_infos(ivy_products, confs=None):
"""Returns a list of dicts containing the paths of various jar file resources.
Keys include 'default' (normal jar path), 'sources' (path to source jar), and 'javadoc'
(path to doc jar). None of them are guaranteed to be present, but 'sources' and 'javadoc'
will never be present if 'default' isn't.
:param ivy_products: ivy_jar_products data from a context
:param confs: List of key types to return (eg ['default', 'sources']). Just returns 'default' if
left unspecified.
:returns mapping of IvyModuleRef --> {'default' : [<jar_filenames>],
'sources' : [<jar_filenames>],
'javadoc' : [<jar_filenames>]}
"""
confs = confs or ['default']
classpath_maps = defaultdict(dict)
if ivy_products:
for conf, info_group in ivy_products.items():
if conf not in confs:
continue # We don't care about it.
for info in info_group:
for module in info.modules_by_ref.values():
if module.artifacts:
classpath_maps[module.ref][conf] = [artifact.path for artifact in module.artifacts]
return classpath_maps
```
#### File: python/tasks/python_task.py
```python
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import tempfile
from contextlib import contextmanager
from pex.pex_builder import PEXBuilder
from twitter.common.collections import OrderedSet
from pants.backend.core.tasks.task import Task
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.python_chroot import PythonChroot
from pants.backend.python.python_setup import PythonRepos, PythonSetup
from pants.base.exceptions import TaskError
class PythonTask(Task):
@classmethod
def global_subsystems(cls):
return super(PythonTask, cls).global_subsystems() + (PythonSetup, PythonRepos)
def __init__(self, *args, **kwargs):
super(PythonTask, self).__init__(*args, **kwargs)
self._compatibilities = self.get_options().interpreter or [b'']
self._interpreter_cache = None
self._interpreter = None
@property
def interpreter_cache(self):
if self._interpreter_cache is None:
self._interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
PythonRepos.global_instance(),
logger=self.context.log.debug)
# Cache setup's requirement fetching can hang if run concurrently by another pants proc.
self.context.acquire_lock()
try:
# We pass in filters=compatibilities because setting up some python versions
# (e.g., 3<=python<3.3) crashes, and this gives us an escape hatch.
self._interpreter_cache.setup(filters=self._compatibilities)
finally:
self.context.release_lock()
return self._interpreter_cache
@property
def interpreter(self):
"""Subclasses can use this if they're fine with the default interpreter (the usual case)."""
if self._interpreter is None:
self._interpreter = self.select_interpreter(self._compatibilities)
return self._interpreter
def select_interpreter_for_targets(self, targets):
"""Pick an interpreter compatible with all the specified targets."""
allowed_interpreters = OrderedSet(self.interpreter_cache.interpreters)
targets_with_compatibilities = [] # Used only for error messages.
# Constrain allowed_interpreters based on each target's compatibility requirements.
for target in targets:
if target.is_python and hasattr(target, 'compatibility') and target.compatibility:
targets_with_compatibilities.append(target)
compatible_with_target = list(self.interpreter_cache.matches(target.compatibility))
allowed_interpreters &= compatible_with_target
if not allowed_interpreters:
# Create a helpful error message.
unique_compatibilities = set(tuple(t.compatibility) for t in targets_with_compatibilities)
unique_compatibilities_strs = [','.join(x) for x in unique_compatibilities if x]
targets_with_compatibilities_strs = [str(t) for t in targets_with_compatibilities]
raise TaskError('Unable to detect a suitable interpreter for compatibilities: {} '
'(Conflicting targets: {})'.format(' && '.join(unique_compatibilities_strs),
', '.join(targets_with_compatibilities_strs)))
# Return the lowest compatible interpreter.
return self.interpreter_cache.select_interpreter(allowed_interpreters)[0]
def select_interpreter(self, filters):
"""Subclasses can use this to be more specific about interpreter selection."""
interpreters = self.interpreter_cache.select_interpreter(
list(self.interpreter_cache.matches(filters)))
if len(interpreters) != 1:
raise TaskError('Unable to detect a suitable interpreter.')
interpreter = interpreters[0]
self.context.log.debug('Selected {}'.format(interpreter))
return interpreter
@contextmanager
def temporary_chroot(self, interpreter=None, pex_info=None, targets=None,
extra_requirements=None, platforms=None, pre_freeze=None):
"""Yields a temporary PythonChroot created with the specified args.
pre_freeze is an optional function run on the chroot just before freezing its builder,
to allow for any extra modification.
"""
path = tempfile.mkdtemp()
builder = PEXBuilder(path=path, interpreter=interpreter, pex_info=pex_info)
with self.context.new_workunit('chroot'):
chroot = PythonChroot(
context=self.context,
python_setup=PythonSetup.global_instance(),
python_repos=PythonRepos.global_instance(),
targets=targets,
extra_requirements=extra_requirements,
builder=builder,
platforms=platforms,
interpreter=interpreter)
chroot.dump()
if pre_freeze:
pre_freeze(chroot)
builder.freeze()
yield chroot
chroot.delete()
```
#### File: pants_test/python/test_resolver.py
```python
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pex.platforms import Platform
from pants.backend.python.resolver import get_platforms
class ResolverTest(unittest.TestCase):
def test_get_current_platform(self):
expected_platforms = [Platform.current(), 'linux-x86_64']
self.assertEqual(set(expected_platforms),
set(get_platforms(['current', 'linux-x86_64'])))
``` |
{
"source": "jinfenglin/free-proxy",
"score": 3
} |
#### File: free-proxy/fp/fp.py
```python
import random
import sys
import lxml.html as lh
import requests
class FreeProxy:
def __init__(self, country_id=[], timeout=0.5,
protocol_type=('http', 'https'),
anonymity=('anonymous', 'transparent', 'elite proxy'),
update_time=15 * 60,
rand=False):
self.country_id = country_id
self.timeout = timeout
self.random = rand
self.anonymity = anonymity
self.update_time = update_time
self.protocol_type = protocol_type
def update_time_to_seconds(self, update_time_str):
parts = update_time_str.split()
num = int(parts[0])
unit = parts[1]
if 'minute' in unit:
num *= 60
return num
def is_valid(self, country_id, https, update_time, anonymity):
if self.country_id and country_id not in self.country_id:
return False
if https not in self.protocol_type:
return False
if anonymity not in self.anonymity:
return False
if update_time > self.update_time:
return False
return True
def get_proxy_list(self):
try:
page = requests.get('https://www.sslproxies.org')
doc = lh.fromstring(page.content)
tr_elements = doc.xpath('//*[@id="proxylisttable"]//tr')
proxies = []
for i in range(1, 101):
ip = tr_elements[i][0].text_content()
port = tr_elements[i][1].text_content()
country_id = tr_elements[i][2].text_content()
anonymity = tr_elements[i][4].text_content()
https = 'https' if tr_elements[i][6].text_content() == 'yes' else 'http'
update_time_sec = self.update_time_to_seconds(tr_elements[i][7].text_content())
if self.is_valid(country_id, https, update_time_sec, anonymity):
proxy_url = {https: "{}://{}:{}".format(https, ip, port)}
proxies.append(proxy_url)
return proxies
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
def get(self):
proxy_list = self.get_proxy_list()
if self.random:
random.shuffle(proxy_list)
proxy_list = proxy_list
working_proxy = None
for proxies in proxy_list:
try:
if self.check_if_proxy_is_working(proxies):
working_proxy = self.check_if_proxy_is_working(proxies)
return working_proxy
except requests.exceptions.RequestException:
continue
if not working_proxy:
if self.country_id is not None:
self.country_id = None
return self.get()
else:
return 'There are no working proxies at this time.'
def check_if_proxy_is_working(self, proxies):
with requests.get('http://www.google.com', proxies=proxies, timeout=self.timeout, stream=True) as r:
if r.raw.connection.sock:
return list(proxies.values())[0]
``` |
{
"source": "jinfenglin/NLPBox",
"score": 3
} |
#### File: NLPBox/Cleaner/cleaner.py
```python
import re
from nltk import PorterStemmer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop_words = set(stopwords.words('english'))
def clean_phrase(phrase):
phrase = phrase.strip("\n\t\r ")
phrase = keep_only_given_chars(phrase, re_char_white_list="\w\-&\'\s")
phrase = merge_white_space(phrase)
return phrase
def merge_white_space(text):
text = re.sub("[^\S\r\n]+", " ", text)
return text
def remove_space_around_char(char="-", text=""):
"""
Remove the space around a char in the text, eg, text = "health - care" will become
health-care
:param text:
:param char:
:return:
"""
text = re.sub("\s*{}\s*".format(char), char, text)
return text
def keep_only_given_chars(text="", re_char_white_list="\w\-&\s\.\,\'\""):
"""
Remove all characters not in the white list.
:param chars_re: Regulra expression indicating the legal chars
:param text:
:return:
"""
text = re.sub("[^{}]".format(re_char_white_list), " ", text)
return merge_white_space(text)
def esapce_sql_variable_quote(sql_variable):
"""
Sql database usaually encoding single quote by adding an extra single quote.
:param sql_variable:
:return:
"""
return re.sub("\'", "\'\'", sql_variable)
def stem_tokens(tokens):
"""
Stem tokens
:param self:
:param words:
:return:
"""
porter_stemmer = PorterStemmer()
return [porter_stemmer.stem(x) for x in tokens]
def stem_string(str, regx_split_chars="[\s]"):
merge_white_space(str)
tokens = re.split(regx_split_chars, str)
return stem_tokens(tokens)
def remove_stop_words(doc):
words = word_tokenize(doc)
words_filtered = []
for w in words:
if w.lower() not in stop_words:
words_filtered.append(w)
return " ".join(words_filtered)
def remove_content_in_bracket(doc):
p = re.compile("[\[\(\{][^\]\)\}]*[\]\)\}]")
doc = p.sub(" ", doc)
return doc
if __name__ == "__main__":
print(remove_content_in_bracket("a[b]c[d]"))
```
#### File: Scraper/src/ProxyPool.py
```python
import platform
import re
import threading
from time import sleep
from urllib3 import make_headers, ProxyManager
from common import DATA_DIR
import subprocess, os
class ProxyPool:
def __init__(self, proxy_list_file):
self.credit_record = {}
self.waiting_round = {}
self.proxy_list = self.__read_proxy_list(proxy_list_file)
self.available_proxy = set()
def __read_proxy_list(self, file_path):
with open(file_path) as fin:
for line in fin:
proxy_url = line.strip("\n\t\r ")
self.credit_record[proxy_url] = 0
self.waiting_round[proxy_url] = 0
return self.credit_record.keys()
def is_alive_proxy(self, proxy):
host = self.get_ip(proxy)
if platform.system() == "Windows":
command = "ping {} -n 1".format(host)
else:
command = "ping {} -c 1".format(host)
proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE, shell=True)
proc.wait()
isUpBool = False
if proc.returncode == 0:
if self.can_get_response("http://www.example.org", timeout=10, proxy=proxy):
isUpBool = True
return isUpBool
def can_get_response(self, link, timeout, proxy):
try:
header = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7"
headers = make_headers(user_agent=header)
http = ProxyManager(proxy, headers=headers)
response = http.request("GET", link, timeout=timeout)
status_code = response.status
if str(status_code).startswith("2"):
return True
else:
return False
except Exception as e:
return False
def get_ip(self, url):
ip_pattern = r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b'
return re.search(ip_pattern, url).group(0)
def update_proxy_list(self, interval=10):
while True:
for proxy in self.proxy_list:
penalty_degree = self.credit_record[proxy]
remain_waiting = self.waiting_round[proxy]
if remain_waiting > 0:
self.waiting_round[proxy] -= 1
continue
is_live_flag = False
try:
is_live_flag = self.is_alive_proxy(proxy)
except Exception as e:
print(e)
if is_live_flag:
if penalty_degree > 0:
self.credit_record[proxy] -= 1
self.available_proxy.add(proxy)
else:
self.credit_record[proxy] += 1
self.waiting_round[proxy] = min(100, remain_waiting + self.credit_record[proxy])
if proxy in self.available_proxy:
self.available_proxy.remove(proxy)
sleep(interval)
def run(self):
t = threading.Thread(target=self.update_proxy_list, )
t.start()
if __name__ == "__main__":
proxy_path = os.path.join(DATA_DIR, "proxy_list.txt")
proxy_pool = ProxyPool(proxy_path)
proxy_pool.run()
```
#### File: Scraper/src/scrape_document_main.py
```python
from mission import Mission, Sqlite3Manger
from scrap_query import ScrapQuery
from scraper import GoogleScraperWraper
from common import *
import json
import wikipedia
from web_page_parser import StackOverflowParser, QuoraParser, PcMagParser, RegularParagraphParser, InnolutionParser
import logging
def load_terms(vocab_path):
terms = []
with open(vocab_path, encoding="utf8") as fin:
for line in fin.readlines():
term = line.strip("\n\t\r ")
terms.append(term)
return terms
def load_definitoin(definition_path):
definition = {}
with open(definition_path, encoding="utf8") as fin:
for line in fin.readlines():
line = line.strip("\n\t\r ")
split_point = line.rfind(":")
term = line[:split_point]
def_doc = line[split_point + 1:]
definition[term] = def_doc
return definition
def run_stackoverflow_mission(sql_db, terms, scraper, use_proxy, overriding_existing=True):
stk_querys = []
for term in terms:
scrap_query = ScrapQuery([term], template="what is \"{}\"", domain="stackoverflow.com")
stk_querys.append(scrap_query)
stk_parser = StackOverflowParser()
mission = Mission(sql_db, "stackoverflow", stk_querys, stk_parser, scraper, use_proxy)
mission.run(delay=0.2, thread_num=4, override_existing=overriding_existing)
def run_quora_mission(sql_db, terms, scraper, use_proxy, overriding_existing=True, topic="software engineering"):
quora_querys = []
for term in terms:
scrap_query = ScrapQuery([term], template="what is \"{}\" in " + topic, domain="quora.com")
quora_querys.append(scrap_query)
quora_parser = QuoraParser()
mission = Mission(sql_db, "quora", quora_querys, quora_parser, scraper, use_proxy)
mission.run(delay=0.2, thread_num=4, override_existing=overriding_existing)
def run_pcMag_mission(sql_db, terms, scraper, use_proxy, overriding_existing=False):
pcMag_queries = []
for term in terms:
scrap_query = ScrapQuery([term], template="definition of \"{}\"", domain="pcmag.com/encyclopedia/")
pcMag_queries.append(scrap_query)
pcMag_parser = PcMagParser()
mission = Mission(sql_db, "pcMag", pcMag_queries, pcMag_parser, scraper, use_proxy)
mission.run(delay=0, thread_num=4, link_limit=1, override_existing=overriding_existing)
def run_innolution_mission(sql_db, terms, scraper, use_proxy, overriding_existing=False):
innolution_queires = []
for term in terms:
scrap_query = ScrapQuery([term], template="\"{}\"", domain="innolution.com/resources/glossary")
innolution_queires.append(scrap_query)
innolution_parser = InnolutionParser()
mission = Mission(sql_db, "pcMag", innolution_queires, innolution_parser, scraper, use_proxy)
mission.run(delay=0.2, thread_num=4, link_limit=1, override_existing=overriding_existing)
def run_regularParse_mission(sql_db, terms, scraper, use_proxy, overriding_existing=True, topic="software engineering"):
regular_terms = []
for term in terms:
scrap_query = ScrapQuery([term], template="definition of \"{}\" in " + topic)
regular_terms.append(scrap_query)
regular_parser = RegularParagraphParser()
mission = Mission(sql_db, "regular", regular_terms, regular_parser, scraper, use_proxy)
mission.run(thread_num=4, override_existing=overriding_existing)
def run_wikipedia_parse_mission(sql_db, terms, override_existing=True):
"""
This function use wikipedia api to parse wikipedia document
:return:
"""
sqlite_manager = Sqlite3Manger(sql_db)
sqlite_manager.create_table("wiki")
wiki_dump = {}
for term in terms:
related_page_name = wikipedia.search(term)
page_infos = []
for page_name in related_page_name:
try:
page_info = {}
page_obj = wikipedia.page(page_name)
categories = page_obj.categories
if not __check_wiki_categories(categories):
continue
page_info["summary"] = page_obj.summary
page_info["categories"] = list(set(categories))
page_infos.append(page_info)
except wikipedia.exceptions.DisambiguationError as e:
first_option = e.options[0]
print("{} has ambiguity, try first option {}".format(page_name, first_option))
try:
page_info = {}
page_obj = wikipedia.page(first_option)
categories = page_obj.categories
if not __check_wiki_categories(categories):
continue
page_info["summary"] = page_obj.summary
page_info["categories"] = list(set(categories))
page_infos.append(page_info)
except Exception as e2:
print("First option failed due to {}".format(e))
except Exception as other_e:
print("Exception {}".format(other_e))
wiki_dump[term] = page_infos
for term in wiki_dump:
if override_existing:
sqlite_manager.add_or_update_row("wiki", term, json.dumps(wiki_dump[term]))
else:
sqlite_manager.add_if_not_exist("wiki", term, json.dumps(wiki_dump[term]))
sqlite_manager.conn.commit()
def __check_wiki_categories(categories):
for category in categories:
if __valid_category(category):
return True
return False
def __valid_category(category):
white_list = ["software development", "software engineering", "requirement engineering", "agile development",
"computing", "programming"]
for white_list_item in white_list:
if white_list_item in category:
return True
return False
def run_add_definition_from_file(sql_db, definition_dict):
logger = logging.getLogger("__name__")
logger.info("Start Import definitions ...")
sqlite_manager = Sqlite3Manger(sql_db)
sqlite_manager.create_table("pcMag")
for term in definition_dict:
def_doc = definition_dict[term]
db_dump = [{"term": term, "definition": def_doc}]
sqlite_manager.add_or_update_row("pcMag", term, json.dumps(db_dump))
sqlite_manager.conn.commit()
logger.info("Finished importing definition")
if __name__ == "__main__":
proxies = os.path.join(DATA_DIR, "proxy_list.txt")
sql_db = os.path.join(DATA_DIR, "term_definitions.db")
vocab_path = os.path.join(DATA_DIR, "expansion_on_fly.txt")
scraper = GoogleScraperWraper(proxies)
mode = "add_words"
if mode == "add_definition":
# Add extra vocabulary and definition into the database.
glossory_data_dir = os.path.join(PROJECT_ROOT, "..", "GlossaryProcess", "data")
for dir_name in os.listdir(glossory_data_dir):
dir_path = os.path.join(glossory_data_dir, dir_name)
topic = dir_name.replace("_", " ")
if os.path.isdir(dir_path):
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
definition_dict = load_definitoin(file_path)
terms = list(definition_dict.keys())
run_add_definition_from_file(sql_db, definition_dict)
run_stackoverflow_mission(sql_db, terms, scraper, use_proxy=True)
run_quora_mission(sql_db, terms, scraper, use_proxy=True, topic=topic)
run_regularParse_mission(sql_db, terms, scraper, use_proxy=True, topic=topic)
run_wikipedia_parse_mission(sql_db, terms, scraper)
else:
if mode == "add_words":
terms = load_terms(vocab_path)
elif mode == "dry_run":
sql_db = "Test.db"
terms = ["Objective-C", "Scala", "Swift", "Shell", "TypeScript", "go", "C#", "CSS"]
# Build the database from scratch by give a list of vocabulary
#run_wikipedia_parse_mission(sql_db, terms, scraper)
run_pcMag_mission(sql_db, terms, scraper, use_proxy=True)
run_stackoverflow_mission(sql_db, terms, scraper, use_proxy=True)
run_quora_mission(sql_db, terms, scraper, use_proxy=True)
run_innolution_mission(sql_db, terms, scraper, use_proxy=True)
# run_regularParse_mission(sql_db, terms, scraper, use_proxy=True)
print("Finished...")
```
#### File: Scraper/src/scraper.py
```python
from GoogleScraper import scrape_with_config, GoogleSearchError
from ProxyPool import ProxyPool
from common import *
import shutil
import urllib
import time
from urllib3 import ProxyManager, make_headers, disable_warnings, exceptions
import random
import logging
from sql_db_manager import Sqlite3Manger
class GoogleScraperWraper:
"""
A Scraper wraper, which can
1. Scrape links for a list of queries
2. Retrieve a html page for a link
Current implementation is single thread due to the sqlite features.
"""
def __init__(self, proxy_file=""):
self.user_agent_header = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7"
disable_warnings(exceptions.InsecureRequestWarning)
self.default_scraper_db = "google_scraper.db"
self.default_cache_dir = ".scrapecache"
self.logger = logging.getLogger(__name__)
self.proxy_manger = ProxyPool(proxy_file)
self.proxy_manger.run()
def clear(self, clear_cache=True):
"""
Delete the sqlite database created by GoolgeScraper
:return:
"""
if os.path.isfile(self.default_scraper_db):
os.remove(self.default_scraper_db)
if clear_cache and os.path.isdir(self.default_cache_dir):
shutil.rmtree(self.default_cache_dir)
def scrap_links(self, query_str_list, search_engine=["bing"], page_num=12000, method="http", cache="True",
collect_link=True):
"""
Scraper for a list of queries and get the links as a result. Use search engines to scrap the links.
:param query_str_list:Queries in string format submitted to search engine
:param search_engine: See GoogleScraper package for more information
:param page_num: The number of page to scrap
:param method: Use http for most case
:param cache: Use cache
:return: A dictionary whose key is the query string, and the value is the links
"""
query_set = set(query_str_list)
config = {
'use_own_ip': 'True',
'keywords': query_set,
'search_engines': search_engine,
'num_pages_for_keyword': page_num,
'scrape_method': method,
'do_caching': cache
}
res = {}
if collect_link:
try:
db_session = scrape_with_config(config)
except GoogleSearchError as e:
self.logger.exception("Scraper Error:", e)
print("{} serps to process...".format(db_session.serps))
for serp in db_session.serps:
query = serp.query
if query not in res:
res[query] = set()
for link in serp.links:
res[query].add(link.link)
else:
sql_db_manger = Sqlite3Manger("google_scraper.db")
links = sql_db_manger.get_rows_for_table("link", ['link', 'serp_id', 'title'])
serps = sql_db_manger.get_rows_for_table('serp', ['id', 'query'])
serp_query_dict = {}
for serp in serps:
serp_id = serp[0]
query = serp[1]
serp_query_dict[serp_id] = query
for link in links:
link_url = link[0]
serp_id = link[1]
query = serp_query_dict[serp_id]
if query not in res:
res[query] = set()
res[query].add(link_url)
return res
def __request(self, link, timeout):
headers = make_headers(user_agent=self.user_agent_header)
request = urllib.request.Request(link, None, headers)
with urllib.request.urlopen(request, timeout=timeout) as url:
html_page = url.read()
return html_page
def __request_with_proxy(self, link, timeout, proxies):
headers = make_headers(user_agent=self.user_agent_header)
proxy_ip = random.sample(proxies, 1)[0]
http = ProxyManager(proxy_ip, headers=headers)
response = http.request("GET", link, timeout=timeout)
return response.data
def get_html_for_a_link(self, link, delay=0.1, timeout=10, use_proxy=False):
"""
Retrieve the html page for a link.
:param link:
:param delay:
:param timeout:
:return:
"""
if delay > 0:
time.sleep(delay)
res = ""
try:
proxies = self.proxy_manger.available_proxy
if len(proxies) > 0 and use_proxy:
try:
res = self.__request_with_proxy(link, timeout, proxies)
except Exception as proxy_e:
res = self.__request(link, timeout)
self.logger.exception("Request with proxy exception:", proxy_e)
else:
res = self.__request(link, timeout)
except Exception as e:
self.logger.exception("Exceptions in Scraping link {} :{}".format(link, e))
if not res:
res = ""
return res
if __name__ == "__main__":
proxies = os.path.join(DATA_DIR, "proxy_list.txt")
gsw = GoogleScraperWraper(proxies)
keywords = ["apple", "google inc", "maven"]
res_dict = gsw.scrap_links(keywords)
for k in res_dict:
for link in res_dict[k]:
print(link.link)
print(gsw.get_html_for_a_link(link.link, use_proxy=True))
print("------------------")
```
#### File: Scraper/src/sql_db_manager.py
```python
import sys
sys.path.append("../../Cleaner")
import sqlite3
import cleaner
import logging, threading
class Sqlite3Manger:
def __init__(self, sqlite_file, commit_period=50):
self.conn = sqlite3.connect(sqlite_file)
self.logger = logging.getLogger(__name__)
self.lock = threading.Lock()
self.execute_count = 0
self.commit_period = commit_period
def __del__(self):
try:
self.conn.commit() # commit result before quiting
self.conn.close()
except Exception as e:
self.logger.exception("Close Db Exception:{}".format(e))
def __execute(self, sql):
"""
Synchronized function
:param sql:
:return:
"""
try:
with self.lock:
c = self.conn.cursor()
res = c.execute(sql)
self.execute_count += 1
if self.execute_count > self.commit_period:
self.logger.debug("Commit in database")
self.execute_count = 0
self.conn.commit()
return res
except Exception as e:
self.logger.info("Error with executing sql:", sql)
self.logger.exception(e)
def create_table(self, table_name):
table_name = cleaner.esapce_sql_variable_quote(table_name)
sql = "CREATE TABLE IF NOT EXISTS {} (query text PRIMARY KEY, content text);".format(table_name)
self.__execute(sql)
def drop_table(self, table_name):
table_name = cleaner.esapce_sql_variable_quote(table_name)
sql = "DROP TABLE IF EXISTS {};".format(table_name)
self.__execute(sql)
def add_or_update_row(self, table_name, query, content):
table_name = cleaner.esapce_sql_variable_quote(table_name)
query = cleaner.esapce_sql_variable_quote(query)
content = cleaner.esapce_sql_variable_quote(content)
sql = "INSERT OR REPLACE INTO {} VALUES (\'{}\',\'{}\')".format(table_name, query, content)
self.__execute(sql)
def add_if_not_exist(self, table_name, query, content):
table_name = cleaner.esapce_sql_variable_quote(table_name)
query = cleaner.esapce_sql_variable_quote(query)
content = cleaner.esapce_sql_variable_quote(content)
sql = "INSERT OR IGNORE INTO {} VALUES (\'{}\',\'{}\')".format(table_name, query, content)
self.__execute(sql)
def get_rows_for_table(self, table_name, colums):
table_name = cleaner.esapce_sql_variable_quote(table_name)
columns = ",".join(colums)
sql = "SELECT {} FROM {}".format(columns,table_name)
return self.__execute(sql)
def get_content_for_query(self, query):
"""
Find parsed information from all tables for a single query
:param query:
:return: A dictionary contains doc parsed from different source. The content is a json string
"""
type_content = {}
sql_get_all_tables = "SELECT name FROM sqlite_master WHERE type = 'table'"
table_names = self.__execute(sql_get_all_tables)
for name_row in table_names:
table_name = cleaner.esapce_sql_variable_quote(name_row[0])
query = cleaner.esapce_sql_variable_quote(query)
type_content[table_name] = ""
sql = "SELECT content FROM {} WHERE query = \'{}\'".format(table_name, query)
for content_row in self.__execute(sql):
content = content_row[0]
if not content:
content = ""
type_content[table_name] = content
return type_content
if __name__ == "__main__":
sqlM = Sqlite3Manger("google_scraper.db")
rows = sqlM.get_rows_for_table("link", ['link', 'serp_id', 'title'])
for row in rows:
print(row)
```
#### File: mergeRQ1.1ANDRQ1.2BySelectNonHeuristic(OriginVerisionIncluded)/not_filter_result/filter_result.py
```python
from nltk.stem.porter import PorterStemmer
import os
def __stem_Tokens(words):
porter_stemmer = PorterStemmer()
return [porter_stemmer.stem(x) for x in words.split(" ")]
def same_pre_post(tokens1, tokens2):
if tokens1[0] == tokens2[0] or tokens1[-1] == tokens2[-1]:
return True
return False
def single_token_same_pre_post_fix(tokens1, tokens2):
if len(tokens1) == 1 and len(tokens2) == 1:
w1 = tokens1[0]
w2 = tokens2[0]
if len(w1) > 3 and len(w2) > 3:
return w1[:3] == w2[:3] or w1[-3:] == w2[-3:]
return False
def share_tokens(tokens1, tokens2):
for tk1 in tokens1:
for tk2 in tokens2:
if tk1 == tk2:
return True
return False
def is_heuristic_ones(w1, w2):
w1_stems = __stem_Tokens(w1)
w2_stems = __stem_Tokens(w2)
if same_pre_post(w1_stems, w2_stems):
return True
return False
for file_name in os.listdir("."):
if not os.path.isfile(file_name) or (not file_name.endswith("txt") and not file_name.endswith("csv")):
continue
# file_name = "FeedForward_Result{}.txt".format(i)
tn = 0
tp = 0
fn = 0
fp = 0
with open(file_name) as fin, open("../filter_result/{}".format(file_name), "w") as fout, open(
"../filter_result/csv/{}".format(file_name), "w") as csv_fout:
cnt = 0
for line in fin:
cnt += 1
line = line.strip("\n")
if "label, correctness, w1, w2" in line:
if cnt == 1:
continue
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = (tp + tn) / (tp + tn + fn + fp)
csv_fout.write("{},{},{},{}\n".format(recall, precision, f1, accuracy))
tn = 0
tp = 0
fn = 0
fp = 0
else:
parts = [x for x in line.split("\t") if len(x) > 0]
if len(parts) < 5:
print(parts)
continue
pre_label = parts[0]
correctness = parts[1]
score = parts[2]
w1 = parts[3]
w2 = parts[4]
if is_heuristic_ones(w1, w2):
continue
if correctness == "Correct":
if pre_label == "yes":
tp += 1
else:
tn += 1
else:
if pre_label == "yes":
fp += 1
else:
fn += 1
fout.write(line + "\n")
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = (tp + tn) / (tp + tn + fn + fp)
csv_fout.write("{},{},{},{}\n".format(recall, precision, f1, accuracy))
```
#### File: NLPBox/Utils/dict_utils.py
```python
def collection_to_index_dict(collection):
"""
Given a collection, give each type of the data a unique integer id
:param collection:
:return: a diction project object to a number
"""
res = {}
for i, entry in enumerate(collection):
res[entry] = i
return res
def invert_dict(origin_dict):
inv_map = {v: k for k, v in origin_dict.items()}
return inv_map
```
#### File: NLPBox/Utils/output_utils.py
```python
import logging
def write_csv(res, writer):
res = [str(x) for x in res]
content = ",".join(res)
try:
writer.write(content + "\n")
except Exception as e:
logging.getLogger(__name__).info(content)
``` |
{
"source": "JinFree/tf_trt_models",
"score": 3
} |
#### File: JinFree/tf_trt_models/convert_to_FP16_Classification.py
```python
import cv2
import sys
import os
import urllib
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
import numpy as np
from tf_trt_models.classification import download_classification_checkpoint, build_classification_graph
PB32_PATH = './SE-ResNeXt.pb'
CHECKPOINT_PATH = './ckpt_n_pb/SE_ResNeXt_epoch_26.ckpt'
NUM_CLASSES = 1000
LABELS_PATH = './imagenet_labels.txt'
output_node_names = ['Softmax']
input_node_names = ['Placeholder']
def open_pb(PB_PATH):
graph_def = tf.GraphDef()
graph = tf.Graph()
with tf.gfile.GFile(PB_PATH, 'rb') as fid:
graph_def.ParseFromString(fid.read())
return graph, graph_def
_, frozen_graph = open_pb(PB32_PATH)
input_names = input_node_names
output_names = output_node_names
trt_graph = trt.create_inference_graph(
input_graph_def=frozen_graph,
outputs=output_names,
max_batch_size=1,
max_workspace_size_bytes=1 << 25,
precision_mode='FP16',
minimum_segment_size=50
)
with open('./SE-ResNeXt_16.pb', 'wb') as f:
f.write(trt_graph.SerializeToString())
print('FP16 변환 완료')
``` |
{
"source": "jinfwhuang/tractor",
"score": 2
} |
#### File: python/farm_ng/blobstore.py
```python
import logging
import os
import pathlib
import google.protobuf.json_format as json_format
from farm_ng_proto.tractor.v1.resource_pb2 import Bucket
logger = logging.getLogger('blobstore')
logger.setLevel(logging.INFO)
class InvalidBucketException(Exception):
pass
class Blobstore:
def __init__(self):
self.root = os.getenv('BLOBSTORE_ROOT')
if (not self.root):
raise Exception('BLOBSTORE_ROOT not set.')
def read_protobuf_from_json_file(self, path, message):
self._check_valid_path(path)
with open(os.path.join(self.root, path)) as f:
json_format.Parse(f.read(), message)
def read_protobuf_from_binary_file(self, path, message):
self._check_valid_path(path)
with open(os.path.join(self.root, path)) as f:
message.ParseFromString(f.read())
def bucket_relative_path(self, bucket_id):
name = Bucket.Name(bucket_id)
return name[len('BUCKET_'):].lower()
def _check_valid_path(self, path):
valid_buckets = [self.bucket_relative_path(id) for id in Bucket.values()]
target_bucket = pathlib.Path(path).parts[0]
if target_bucket not in valid_buckets:
raise InvalidBucketException(f'Invalid bucket: {target_bucket}')
def _write_protobuf_to_json_file(self, path, message):
raise NotImplementedError()
def _write_protobuf_to_binary_file(self, path, message):
raise NotImplementedError()
def write_protobuf_as_resource(self, path, message, serialization='json'):
raise NotImplementedError()
def read_protobuf_from_resource(self, resource):
raise NotImplementedError()
```
#### File: python/farm_ng/config.py
```python
import argparse
import os
from google.protobuf.json_format import MessageToJson
from farm_ng.blobstore import Blobstore
from farm_ng_proto.tractor.v1.apriltag_pb2 import ApriltagConfig
from farm_ng_proto.tractor.v1.apriltag_pb2 import TagConfig
from farm_ng_proto.tractor.v1.resource_pb2 import BUCKET_CONFIGURATIONS
from farm_ng_proto.tractor.v1.tractor_pb2 import TractorConfig
def _in2m(inches: float) -> float:
return 0.0254*inches
class TractorConfigManager:
@staticmethod
def saved():
blobstore = Blobstore()
config = TractorConfig()
blobstore.read_protobuf_from_json_file(
os.path.join(blobstore.bucket_relative_path(BUCKET_CONFIGURATIONS), 'tractor.json'),
config,
)
return config
@staticmethod
def default():
config = TractorConfig()
config.wheel_baseline.value = _in2m(42.0)
config.wheel_radius.value = _in2m(17/2.0)
config.hub_motor_gear_ratio.value = 29.9
config.hub_motor_poll_pairs.value = 8
config.topology = TractorConfig.Topology.TOPOLOGY_TWO_MOTOR_DIFF_DRIVE
return config
class ApriltagConfigManager:
@staticmethod
def saved():
blobstore = Blobstore()
config = ApriltagConfig()
blobstore.read_protobuf_from_json_file(
os.path.join(blobstore.bucket_relative_path(BUCKET_CONFIGURATIONS), 'apriltag.json'),
config,
)
return config
@staticmethod
def default():
config = ApriltagConfig()
config.tag_library.tags.extend([TagConfig(id=id, size=0.16) for id in range(0, 10)])
return config
def gentractor(args):
print(MessageToJson(TractorConfigManager.default(), including_default_value_fields=True))
def genapriltag(args):
print(MessageToJson(ApriltagConfigManager.default(), including_default_value_fields=True))
def main():
parser = argparse.ArgumentParser(epilog='e.g. python -m farm_ng.config gentractor > $BLOBSTORE_ROOT/configurations/tractor.json')
subparsers = parser.add_subparsers()
gentractor_parser = subparsers.add_parser('gentractor')
gentractor_parser.set_defaults(func=gentractor)
genapriltag_parser = subparsers.add_parser('genapriltag')
genapriltag_parser.set_defaults(func=genapriltag)
list_parser = subparsers.add_parser('list')
list_parser.set_defaults(
func=(
lambda args: print(' '.join([c[3:] for c in subparsers.choices.keys() if c.startswith('gen')]))
),
)
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_help()
return
args.func(args)
if __name__ == '__main__':
main()
```
#### File: python/farm_ng/tractor.py
```python
import bisect
import logging
import sys
from collections import deque
import numpy as np
from google.protobuf.text_format import MessageToString
from google.protobuf.timestamp_pb2 import Timestamp
from liegroups import SE3
from farm_ng.canbus import CANSocket
from farm_ng.config import TractorConfigManager
from farm_ng.controller import TractorMoveToGoalController
from farm_ng.ipc import EventBus
from farm_ng.ipc import get_event_bus
from farm_ng.ipc import make_event
from farm_ng.kinematics import TractorKinematics
from farm_ng.motor import HubMotor
from farm_ng.periodic import Periodic
from farm_ng.proto_utils import proto_to_se3
from farm_ng.proto_utils import se3_to_proto
from farm_ng.steering import SteeringClient
from farm_ng_proto.tractor.v1.geometry_pb2 import NamedSE3Pose
from farm_ng_proto.tractor.v1.steering_pb2 import SteeringCommand
from farm_ng_proto.tractor.v1.tractor_pb2 import TractorConfig
from farm_ng_proto.tractor.v1.tractor_pb2 import TractorState
logger = logging.getLogger('tractor')
logger.setLevel(logging.INFO)
class TimeSeriesItem:
def __init__(self, stamp, message):
self.message = message
self.stamp = stamp
def __lt__(self, other):
return int(self.stamp.ToMicroseconds()) < int(other.stamp.ToMicroseconds())
class TimeSeries:
def __init__(self, time_window=10.0):
self._items = deque()
self._time_window = time_window
def push(self, message, stamp):
bisect.insort(self._items, TimeSeriesItem(stamp, message))
if(self._items[-1].stamp.ToMicroseconds() - self._items[0].stamp.ToMicroseconds())*1e-6 > self._time_window:
self._items.popleft()
def find_nearest(self, stamp):
item = self._items[bisect.bisect_left(self._items, TimeSeriesItem(stamp, None))-1]
return item.message, item.stamp
class TractorController:
def __init__(self, event_bus: EventBus):
self.command_rate_hz = 50
self.command_period_seconds = 1.0 / self.command_rate_hz
self.n_cycle = 0
# self.record_counter = 0
# self.recording = False
self.event_bus = event_bus
self.event_bus.add_subscriptions(['pose/tractor/base/goal'])
self.event_bus.add_event_callback(self._on_event)
self.lock_out = False
self.can_socket = CANSocket('can0', self.event_bus.event_loop())
self.steering = SteeringClient(self.event_bus)
self.tractor_state = TractorState()
self.odom_poses_tractor = TimeSeries(1.0)
self.odom_pose_tractor = SE3.identity()
self.config = TractorConfigManager.saved()
self.kinematics = TractorKinematics(tractor_config=self.config)
self.move_to_goal_controller = TractorMoveToGoalController()
radius = self.config.wheel_radius.value
gear_ratio = self.config.hub_motor_gear_ratio.value
poll_pairs = self.config.hub_motor_poll_pairs.value
self.right_motor = HubMotor(
'right_motor',
radius, gear_ratio, poll_pairs, 7, self.can_socket,
)
self.left_motor = HubMotor(
'left_motor',
radius, gear_ratio, poll_pairs, 9, self.can_socket,
)
if self.config.topology == TractorConfig.TOPOLOGY_FOUR_MOTOR_SKID_STEER:
logger.info('Four Motor Skid Steer Mode')
self.right_motor_aft = HubMotor(
'right_motor_aft',
radius, gear_ratio, poll_pairs, 8, self.can_socket,
)
self.left_motor_aft = HubMotor(
'left_motor_aft',
radius, gear_ratio, poll_pairs, 10, self.can_socket,
)
self.control_timer = Periodic(
self.command_period_seconds, self.event_bus.event_loop(),
self._command_loop, name='control_loop',
)
self._last_odom_stamp = None
def _on_event(self, event):
if event.name == 'pose/tractor/base/goal':
pose = NamedSE3Pose()
event.data.Unpack(pose)
assert pose.frame_a == 'tractor/base'
odom_pose_tractor, stamp = self.odom_poses_tractor.find_nearest(event.stamp)
# now = Timestamp()
# now.GetCurrentTime()
# logger.info('Got goal: %f delayed', (now.ToMicroseconds() - event.stamp.ToMicroseconds())*1e-6)
tractor_pose_goal = proto_to_se3(pose.a_pose_b)
odom_pose_goal = odom_pose_tractor.dot(tractor_pose_goal)
self.move_to_goal_controller.set_goal(odom_pose_goal)
def _command_velocity(self, v, w):
self.tractor_state.target_unicycle_velocity = v
self.tractor_state.target_unicycle_angular_velocity = w
left, right = self.kinematics.unicycle_to_wheel_velocity(
self.tractor_state.target_unicycle_velocity,
self.tractor_state.target_unicycle_angular_velocity,
)
self.tractor_state.commanded_brake_current = 0
self.tractor_state.commanded_wheel_velocity_rads_left = left
self.tractor_state.commanded_wheel_velocity_rads_right = right
self.right_motor.send_velocity_command_rads(right)
self.left_motor.send_velocity_command_rads(left)
if self.config.topology == TractorConfig.TOPOLOGY_FOUR_MOTOR_SKID_STEER:
self.right_motor_aft.send_velocity_command_rads(right)
self.left_motor_aft.send_velocity_command_rads(left)
def _servo(self, steering_command: SteeringCommand):
vel = max(steering_command.velocity, 0)
v, w = self.move_to_goal_controller.update(self.odom_pose_tractor, vel)
# logger.info('servoing: %f %f %f', vel, v, w)
self._command_velocity(v, w)
def _command_loop(self, n_periods):
# n_periods is the number of periods since the last call. Should normally be 1.
now = Timestamp()
now.GetCurrentTime()
if (self.n_cycle % (5*self.command_rate_hz)) == 0:
logger.info(
'\nright motor:\n %s\nleft motor:\n %s\n state:\n %s',
MessageToString(self.right_motor.get_state(), as_one_line=True),
MessageToString(self.left_motor.get_state(), as_one_line=True),
MessageToString(self.tractor_state, as_one_line=True),
)
self.tractor_state.stamp.CopyFrom(now)
self.tractor_state.wheel_velocity_rads_left = self.left_motor.velocity_rads()
self.tractor_state.wheel_velocity_rads_right = self.right_motor.velocity_rads()
self.tractor_state.average_update_rate_left_motor = self.left_motor.average_update_rate()
self.tractor_state.average_update_rate_right_motor = self.right_motor.average_update_rate()
if self.config.topology == TractorConfig.TOPOLOGY_FOUR_MOTOR_SKID_STEER:
self.tractor_state.wheel_veolcity_rads_left_aft = self.left_motor_aft.velocity_rads()
self.tractor_state.wheel_veolcity_rads_right_aft = self.right_motor_aft.velocity_rads()
self.tractor_state.average_update_rate_left_aft_motor = self.left_motor_aft.average_update_rate()
self.tractor_state.average_update_rate_right_aft_motor = self.right_motor_aft.average_update_rate()
if self._last_odom_stamp is not None:
dt = (now.ToMicroseconds() - self._last_odom_stamp.ToMicroseconds())*1e-6
min_dt = 0.0
max_dt = 1.0 # 1 second
if dt < min_dt or dt > max_dt:
# this condition can occur if n_periods skipped is high
# or negative if for some reason the clock is non-monotonic - TODO(ethanrublee) should we use a monotonic clock?
logger.warn('odometry time delta out of bounds, clipping. n_period=%d dt=%f min_dt=%f max_dt=%f', n_periods, dt, min_dt, max_dt)
self.tractor_state.dt = np.clip(dt, min_dt, max_dt)
tractor_pose_delta = self.kinematics.compute_tractor_pose_delta(
self.tractor_state.wheel_velocity_rads_left,
self.tractor_state.wheel_velocity_rads_right,
self.tractor_state.dt,
)
self.odom_pose_tractor = self.odom_pose_tractor.dot(tractor_pose_delta)
self.odom_poses_tractor.push(self.odom_pose_tractor, now)
self.tractor_state.abs_distance_traveled += np.linalg.norm(tractor_pose_delta.trans)
self.tractor_state.odometry_pose_base.a_pose_b.CopyFrom(se3_to_proto(self.odom_pose_tractor))
self.tractor_state.odometry_pose_base.frame_a = 'odometry/wheel'
self.tractor_state.odometry_pose_base.frame_b = 'tractor/base'
self.event_bus.send(
make_event(
'pose/tractor/base',
self.tractor_state.odometry_pose_base, stamp=now,
),
)
self._last_odom_stamp = now
self.n_cycle += 1
brake_current = 10.0
steering_command = self.steering.get_steering_command()
if steering_command.brake > 0.0:
self.tractor_state.commanded_brake_current = brake_current
self.tractor_state.commanded_wheel_velocity_rads_left = 0.0
self.tractor_state.commanded_wheel_velocity_rads_right = 0.0
self.tractor_state.target_unicycle_velocity = 0.0
self.tractor_state.target_unicycle_angular_velocity = 0.0
self.right_motor.send_current_brake_command(brake_current)
self.left_motor.send_current_brake_command(brake_current)
if self.config.topology == TractorConfig.TOPOLOGY_FOUR_MOTOR_SKID_STEER:
self.right_motor_aft.send_current_brake_command(brake_current)
self.left_motor_aft.send_current_brake_command(brake_current)
self.move_to_goal_controller.reset()
elif steering_command.mode in (SteeringCommand.MODE_SERVO,):
self._servo(steering_command)
elif steering_command.mode in (SteeringCommand.MODE_JOYSTICK_MANUAL, SteeringCommand.MODE_JOYSTICK_CRUISE_CONTROL):
self._command_velocity(steering_command.velocity, steering_command.angular_velocity)
self.event_bus.send(make_event('tractor_state', self.tractor_state))
def main():
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
event_bus = get_event_bus('farm_ng.tractor')
controller = TractorController(event_bus)
logger.info('Created controller %s', controller)
event_bus.event_loop().run_forever()
if __name__ == '__main__':
main()
``` |
{
"source": "jing-2020/FA-DenseNet",
"score": 3
} |
#### File: FA-DenseNet/example/example.py
```python
from scipy.io import loadmat
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, random_split
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
import time
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import sys
from math import ceil
import optuna
torch.manual_seed(88)
torch.cuda.manual_seed(88)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class block(nn.Module):
def __init__(self, in_features, hidden1, hidden2, hidden3, out_features):
super(block, self).__init__()
self.short = nn.Sequential(
nn.BatchNorm1d(in_features),
nn.ReLU()
)
self.density = nn.Sequential(
nn.Linear(in_features, hidden1),
nn.ReLU(),
nn.Linear(hidden1, hidden2),
nn.ReLU(),
nn.Linear(hidden2, hidden3),
nn.ReLU(),
nn.Linear(hidden3, out_features)
, nn.Dropout(0.5)
)
def forward(self, x):
x = self.density(self.short(x))
return x
class Fullnet(nn.Module):
def __init__(self, in_features, out_features,a,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15):
super(Fullnet, self).__init__()
self.input = nn.Linear(in_features, a)
self.block1 = block(a, a1, a2, a3, a)
self.block2 = block(a, a4, a5, a6, a)
self.block3 = block(a, a7, a8, a9, a)
self.block4 = block(a, a10, a11, a12, a)
self.block5 = block(a, a13, a14, a15, out_features)
self.output = nn.Softmax(dim=1)
def forward(self, x):
x = self.input(x)
d1 = x
x = self.block1(d1)
d2 = x + d1
x = self.block2(d2)
d3 = x + d1 + d2
x = self.block3(d3)
d4 = x + d1 + d2 + d3
x = self.block4(d4)
d5 = x + d1 + d2 + d3 + d4
x = self.block5(d5)
x = self.output(x)
return x
class mydata(Dataset):
def __init__(self, file1, file2):
data=loadmat(file1)
data=data['P_abnormal']
q=pd.read_excel(file2,header=None)
q=q.values
data=data[:,[447, 314, 276, 327, 153, 111, 230, 0, 422, 79, 351, 75, 288, 410, 355, 365, 396, 160, 131, 456, 260, 34, 209, 55]]
label=np.zeros((data.shape[0],1),dtype=np.int64)
for j,i in enumerate(range(0,data.shape[0],24)):
label[i:(i+24),0]=q[j,0]-1
model=MinMaxScaler()
data=model.fit_transform(data)
self.data = data
self.data = self.data.astype(np.float32)
self.label=label
def __getitem__(self, index):
return self.data[index, :], self.label[index, 0] - 1
def __len__(self):
return self.data.shape[0]
def top5(precision, true, device):
value, index = torch.topk(precision, 5, dim=1)
numbers = true.shape[0]
accuracy = torch.zeros(numbers).to(device)
for i in range(numbers):
if true[i] in index[i, :]:
accuracy[i] = 1
return (torch.sum(accuracy) / torch.Tensor([numbers]).to(device)).item()
def top1(precision, true, device):
index = torch.max(precision, 1)[1]
accuracy = sum(index == true) / torch.Tensor([true.shape[0]]).to(device)
return accuracy.item()
def objective(trial):
train_size = 0.8
BATCH_SIZE = 320
EPOCH = 100
LR = 0.001
a=trial.suggest_int("a",40,80)
a1=trial.suggest_int("a1",80,160)
a2 = trial.suggest_int("a2", 160, 320)
a3 = trial.suggest_int("a3", 80, 160)
a4=trial.suggest_int("a4",80,200)
a5 = trial.suggest_int("a5", 160, 480)
a6 = trial.suggest_int("a6", 80, 200)
a7=trial.suggest_int("a7",80,160)
a8 = trial.suggest_int("a8", 160, 320)
a9 = trial.suggest_int("a9", 80, 160)
a10=trial.suggest_int("a10",80,160)
a11 = trial.suggest_int("a11", 160, 320)
a12 = trial.suggest_int("a12", 80, 160)
a13=trial.suggest_int("a13",80,160)
a14 = trial.suggest_int("a14", 160, 320)
a15 = trial.suggest_int("a15", 80, 160)
Data = mydata(r"data.mat", r'area_id.xlsx')
train_size = int(len(Data) * train_size)
test_size = len(Data) - train_size
train_data, test_data = random_split(Data, [train_size, test_size])
train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_data, batch_size=len(test_data))
model = Fullnet(24, 24,a,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
F_loss = torch.nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, [x, y] in enumerate(train_loader):
x, y = x.to(device), y.to(device)
py = model(x)
loss = F_loss(py, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
for tx, ty in test_loader:
tx, ty = tx.to(device), ty.to(device)
pty = model(tx)
loss = F_loss(pty, ty)
accuracy_top1 = top1(pty, ty, device)
# accuracy_top5 = top5(pty, ty, device)
model.train()
trial.report(accuracy_top1, epoch)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return accuracy_top1
if __name__ == "__main__":
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=100)
pruned_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED]
complete_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
# print(" Params: ")
# for key, value in trial.params.items():
# print(" {}: {}".format(key, value))
``` |
{
"source": "jing5460/189Cloud-Checkin",
"score": 2
} |
#### File: jing5460/189Cloud-Checkin/189Checkin.py
```python
import requests
import time
import re
import rsa
import base64
import hashlib
import os
import sys
sys.path.append('.')
requests.packages.urllib3.disable_warnings()
try:
from pusher import pusher
except:
pass
from urllib import parse
username = os.environ.get("username")
password = os.environ.get("password")
TGBOTAPI = os.environ.get("TGBOTAPI")
TGID = os.environ.get("TGID")
tianyi_session = requests.Session()
result = '🏆天翼云盘签到姬🏆\n'
def pushtg(data):
global TGBOTAPI
global TGID
requests.post(
'https://api.telegram.org/bot'+TGBOTAPI+'/sendMessage?chat_id='+TGID+'&text='+data)
# 【BOTAPI】格式为123456:abcdefghi
# 【TGID】格式为123456(人)或者-100123456(群组/频道)
if (username == "" or password == ""):
username = input("账号:")
password = input("密码:")
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.1.1; SM-G930K Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 Ecloud/8.6.3 Android/22 clientId/355325117317828 clientModel/SM-G930K imsi/460071114317824 clientChannelId/qq proVersion/1.0.6',
"Referer": "https://m.cloud.189.cn/zhuanti/2016/sign/index.jsp?albumBackupOpened=1",
"Host": "m.cloud.189.cn",
"Accept-Encoding": "gzip, deflate",
}
def main():
msg = login(username, password)
if msg != "error":
checkin()
lottery(1)
lottery(2)
pushtg(result)
# 签到
def checkin():
global result
rand = str(round(time.time() * 1000))
url = f'https://api.cloud.189.cn/mkt/userSign.action?rand={rand}&clientType=TELEANDROID&version=8.6.3&model=SM-G930K'
response = tianyi_session.get(url, headers=headers)
netdiskBonus = response.json()['netdiskBonus']
try:
if response.json()['isSign'] == "false":
result += f"🎉签到成功,获得了{netdiskBonus}M空间\n"
else:
result += f"🎉签到成功,获得了{netdiskBonus}M空间\n"
except Exception as e:
result += '🧨签到失败!'
# 抽奖
def lottery(few):
global result
url = ''
if few == 1:
url = 'https://m.cloud.189.cn/v2/drawPrizeMarketDetails.action?taskId=TASK_SIGNIN&activityId=ACT_SIGNIN'
if few == 2:
url = 'https://m.cloud.189.cn/v2/drawPrizeMarketDetails.action?taskId=TASK_SIGNIN_PHOTOS&activityId=ACT_SIGNIN'
response = tianyi_session.get(url, headers=headers)
if "errorCode" in response.text:
if response.json()['errorCode'] == "User_Not_Chance":
result += f"第{str(few)}次抽奖次数不足\n"
else:
result += f"第{str(few)}次抽奖出错\n"
else:
message = ''
if "prizeName" in response.json():
message = ",获得" + response.json()['prizeName']
result += f"第{str(few)}次抽奖完成{message}\n"
BI_RM = list("0123456789abcdefghijklmnopqrstuvwxyz")
def int2char(a):
return BI_RM[a]
def b64tohex(a):
b64map = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
d = ""
e = 0
c = 0
for i in range(len(a)):
if list(a)[i] != "=":
v = b64map.index(list(a)[i])
if 0 == e:
e = 1
d += int2char(v >> 2)
c = 3 & v
elif 1 == e:
e = 2
d += int2char(c << 2 | v >> 4)
c = 15 & v
elif 2 == e:
e = 3
d += int2char(c)
d += int2char(v >> 2)
c = 3 & v
else:
e = 0
d += int2char(c << 2 | v >> 4)
d += int2char(15 & v)
if e == 1:
d += int2char(c << 2)
return d
def rsa_encode(j_rsakey, string):
rsa_key = f"-----BEGIN PUBLIC KEY-----\n{j_rsakey}\n-----END PUBLIC KEY-----"
pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(rsa_key.encode())
result = b64tohex(
(base64.b64encode(rsa.encrypt(f'{string}'.encode(), pubkey))).decode())
return result
def calculate_md5_sign(params):
return hashlib.md5('&'.join(sorted(params.split('&'))).encode('utf-8')).hexdigest()
def login(username, password):
global result
url = "https://cloud.189.cn/api/portal/loginUrl.action?redirectURL=https://cloud.189.cn/web/redirect.html"
r = tianyi_session.get(url)
captchaToken = re.findall(r"captchaToken' value='(.+?)'", r.text)[0]
lt = re.findall(r'lt = "(.+?)"', r.text)[0]
returnUrl = re.findall(r"returnUrl = '(.+?)'", r.text)[0]
paramId = re.findall(r'paramId = "(.+?)"', r.text)[0]
j_rsakey = re.findall(r'j_rsaKey" value="(\S+)"', r.text, re.M)[0]
tianyi_session.headers.update({"lt": lt})
username = rsa_encode(j_rsakey, username)
password = <PASSWORD>_encode(j_rsakey, password)
url = "https://open.e.189.cn/api/logbox/oauth2/loginSubmit.do"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/76.0',
'Referer': 'https://open.e.189.cn/',
}
data = {
"appKey": "cloud",
"accountType": '03',
"userName": f"{{RSA}}{username}",
"password": f"{{<PASSWORD>}",
"validateCode": "",
"captchaToken": captchaToken,
"returnUrl": returnUrl,
"mailSuffix": "@189.cn",
"paramId": paramId
}
try:
r = tianyi_session.post(url, data=data, headers=headers, timeout=5)
if r.json()['result'] == 0:
print(r.json()['msg'])
else:
result += "登录出错"
return "error"
redirect_url = r.json()['toUrl']
r = tianyi_session.get(redirect_url)
return tianyi_session
except Exception as e:
result += "登录账号出现异常!"
if __name__ == "__main__":
main()
def main_handler(event, context):
main()
``` |
{
"source": "jing5460/pybingwallpaper",
"score": 2
} |
#### File: pybingwallpaper/pybingwallpaper/webutil.py
```python
import gzip
import ssl
from io import BytesIO
from . import log
from .ntlmauth import HTTPNtlmAuthHandler
from .py23 import get_moved_attr, import_moved
_logger = log.getChild('webutil')
Request = get_moved_attr('urllib2', 'urllib.request', 'Request')
urlopen = get_moved_attr('urllib2', 'urllib.request', 'urlopen')
URLError = get_moved_attr('urllib2', 'urllib.error', 'URLError')
urlparse = get_moved_attr('urlparse', 'urllib.parse', 'urlparse')
parse_qs = get_moved_attr('urlparse', 'urllib.parse', 'parse_qs')
urlencode = get_moved_attr('urllib', 'urllib.parse', 'urlencode')
urljoin = get_moved_attr('urlparse', 'urllib.parse', 'urljoin')
url_request = import_moved('urllib2', 'urllib.request')
def setup_proxy(proxy_protocols, proxy_url, proxy_port, sites, username="", password=""):
proxy_dict = {p: '%s:%s' % (proxy_url, proxy_port) for p in proxy_protocols}
ph = url_request.ProxyHandler(proxy_dict)
passman = url_request.HTTPPasswordMgrWithDefaultRealm()
_logger.info('add proxy site %s', sites)
passman.add_password(None, sites, username, password)
pnah = HTTPNtlmAuthHandler.ProxyNtlmAuthHandler(passman)
pbah = url_request.ProxyBasicAuthHandler(passman)
pdah = url_request.ProxyDigestAuthHandler(passman)
cp = url_request.HTTPCookieProcessor()
context = ssl.create_default_context()
opener = url_request.build_opener(cp,
url_request.HTTPSHandler(debuglevel=1, context=context),
url_request.HTTPHandler(debuglevel=99),
ph, pnah, pbah, pdah,
url_request.HTTPErrorProcessor())
url_request.install_opener(opener)
def _ungzip(html):
if html[:6] == b'\x1f\x8b\x08\x00\x00\x00':
html = gzip.GzipFile(fileobj=BytesIO(html)).read()
return html
def loadurl(url, headers=None, optional=False):
headers = headers or dict()
if not url:
return None
_logger.debug('getting url %s, headers %s', url, headers)
if 'User-Agent' not in headers:
headers[
'User-Agent'
] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1521.3 Safari/537.36'
try:
req = Request(url=url, headers=headers)
con = urlopen(req)
except Exception as err:
if not optional:
_logger.error('error %s occurs during load %s with header %s', err, url, headers)
_logger.debug('', exc_info=1)
return None
if con:
_logger.debug("Hit %s code: %s", str(con), con.getcode())
data = con.read()
data = _ungzip(data)
_logger.log(log.PAGEDUMP, repr(data))
return data
else:
_logger.error("No data returned.")
return None
def loadpage(url, codec=('utf8', 'strict'), headers=None, optional=False):
headers = headers or dict()
data = loadurl(url, headers=headers, optional=optional)
return data.decode(*codec) if data else None
def postto(url, datadict, headers=None, decodec='gbk'):
headers = headers or dict()
params = urlencode(datadict)
_logger.info('Post %s to %s, headers %s', params, url, headers)
try:
req = Request(url=url, data=params)
for k, v in list(headers.items()):
req.add_header(k, v)
con = urlopen(req)
if con:
_logger.debug("Hit %s %d", str(con), con.getcode())
data = con.read(-1)
return data.decode(decodec)
else:
_logger.error("No data returned.")
return None
except Exception as err:
_logger.error('error %s occurs during post %s to %s', err, params, url)
_logger.debug('', exc_info=1)
def test_header(url, extra_headers=None):
headers = {
'method': 'HEAD',
}
if extra_headers:
headers.update(extra_headers)
resp = loadurl(url, headers, True)
return resp is not None
```
#### File: pybingwallpaper/test/testapppath.py
```python
import unittest
import os
import sys
sys.path.append('../pybingwallpaper')
from main import get_app_path
SOURCE_DIR=''
class TestConfigureParameter(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.oridir = os.path.abspath(os.curdir)
def give_work_dir(self, _dir):
os.chdir(self.oridir)
os.chdir(_dir)
self.curdir = os.path.abspath(os.curdir)
def and_app_src(self, srcfile):
self.srcfile = srcfile
def expect_dir(self, _expect):
_expect = os.path.abspath(_expect)
_expect = os.path.normcase(_expect)
self.assertEqual(get_app_path(self.srcfile), _expect, 'apppath incorrect')
def curdir_still_same(self):
curdir = os.path.abspath(os.curdir)
self.assertEqual(curdir, self.curdir, 'curdir changed')
def test_run_in_src_dir(self):
self.give_work_dir('../pybingwallpaper')
self.and_app_src('main.py')
self.expect_dir(r'E://Work/Python/pybingwallpaper/pybingwallpaper')
self.curdir_still_same()
def test_run_in_cur_dir(self):
self.give_work_dir('.')
self.and_app_src('../pybingwallpaper/main.py')
self.expect_dir(r'E://Work/Python/pybingwallpaper/pybingwallpaper')
self.curdir_still_same()
def test_run_from_root(self):
self.give_work_dir('/')
self.and_app_src(r'work/python/pybingwallpaper/pybingwallpaper/main.py')
self.expect_dir(r'E:\Work\Python\pybingwallpaper\src')
self.curdir_still_same()
def test_run_in_same_disk(self):
self.give_work_dir('e:\\')
self.and_app_src(r'work/python/pybingwallpaper/pybingwallpaper/main.py')
self.expect_dir(r'E:\Work\Python\pybingwallpaper\src')
self.curdir_still_same()
def test_run_in_other_disk(self):
self.give_work_dir('d:')
self.and_app_src(r'e:/work/python/pybingwallpaper/pybingwallpaper/main.py')
self.expect_dir(r'E:\Work\Python\pybingwallpaper\src')
self.curdir_still_same()
def test_run_in_other_disk_dir(self):
self.give_work_dir('c:/windows')
self.and_app_src(r'e:/work/python/pybingwallpaper/pybingwallpaper/main.py')
self.expect_dir(r'E:\Work\Python\pybingwallpaper\src')
self.curdir_still_same()
```
#### File: pybingwallpaper/test/testconfigmodule.py
```python
import unittest
import random
import sys
sys.path.append('../pybingwallpaper')
import config
from config import ConfigParameter
from config import ConfigDatabase
from config import CommandLineArgumentsLoader
from config import DefaultValueLoader
from config import ConfigFileLoader
from config import ConfigFileDumper
from config import Namespace
def getdb():
return ConfigDatabase('test1', description='test desc')
# TODO: Add cases to test loader_srcs option
class TestConfigureParameter(unittest.TestCase):
def setUp(self):
pass
def test_import_config_module(self):
self.assertIsNotNone(ConfigParameter)
self.assertIsNotNone(ConfigDatabase)
def test_init_param(self):
p = ConfigParameter('test1')
self.assertIsNotNone(p)
def test_name(self):
names = ['klb', '1ab', 's#a']
for n in names:
p = ConfigParameter(name = n)
self.assertEqual(p.name, n)
def test_invalid_name(self):
names = ['k b', '\tab', 's\na']
for n in names:
with self.assertRaises(ValueError, msg="parameter name can't contain space"):
ConfigParameter(name = n)
class TestConfigureDatabase(unittest.TestCase):
def setUp(self):
pass
def test_prog(self):
db = getdb()
self.assertEqual(db.prog, 'test1')
def test_desc(self):
db = ConfigDatabase('test1', 'a test database')
self.assertEqual(db.prog, 'test1')
self.assertEqual(db.description, 'a test database')
def test_parameter_init(self):
params = [
ConfigParameter('123'),
ConfigParameter('456')
]
db = ConfigDatabase('test1', parameters=params)
self.assertListEqual(db.parameters, params)
def test_repr(self):
params = [
ConfigParameter('123', type=''),
ConfigParameter('456', type='')
]
db = ConfigDatabase('test1', description='test desc', parameters=params)
dbcopy = eval(repr(db))
self.assertEqual(db.prog, dbcopy.prog)
self.assertEqual(db.description, dbcopy.description)
self.assertListEqual(db.parameters, dbcopy.parameters)
def test_add_parameters(self):
params = [
ConfigParameter('123'),
ConfigParameter('456')
]
new_param = ConfigParameter('789')
db = ConfigDatabase('test1', description='test desc', parameters=params)
self.assertListEqual(db.parameters, params)
db.add_param(new_param)
self.assertListEqual(db.parameters, params+[new_param,])
def test_no_dup_param(self):
params = [
ConfigParameter('123', type=int),
ConfigParameter('456', defaults=9)
]
new_param = ConfigParameter('123')
db = ConfigDatabase('test1', description='test desc', parameters=params)
self.assertListEqual(db.parameters, params)
with self.assertRaises(NameError, msg='duplicated parameter name "%s" found'%(new_param.name,)):
db.add_param(new_param)
self.assertListEqual(db.parameters, params)
class TestCliLoader(unittest.TestCase):
def getdb(self):
return ConfigDatabase('test1', description='test desc')
def getloader(self):
return CommandLineArgumentsLoader()
def test_invalid_arg(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='param1', type=int)
db.add_param(p)
with self.assertRaises(SystemExit) as se:
loader.load(db, ['--not-exist'])
self.assertEqual(se.exception.code, 2)
def test_version(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='notused', loader_opts={'cli':{
'action': 'version',
'flags':('-v','--version'),
'version': 'test-version-1234'
}})
db.add_param(p)
with self.assertRaises(SystemExit) as se:
loader.load(db, ['-v'])
self.assertEqual(se.exception.code, 0)
with self.assertRaises(SystemExit) as se:
loader.load(db, ['--version'])
self.assertEqual(se.exception.code, 0)
def test_name(self):
db = getdb()
cli_opts = {'flags':['-p']}
p = ConfigParameter(name='param1', type=lambda s:int(s,0), loader_opts={'cli':cli_opts})
db.add_param(p)
loader = self.getloader()
with self.assertRaises(SystemExit) as se:
loader.load(db, ['--param1', '1'])
self.assertEqual(se.exception.code, 2)
ans = loader.load(db, ['-p', '1'])
self.assertEqual(getattr(ans, p.name), 1)
def test_load_int(self):
ds = [
('0', 0),
('0x1aedead0b', 0x1aedead0b),
('0b0011', 3),
('-9571293', -9571293),
]
db = getdb()
p = ConfigParameter(name='param1', type=lambda s:int(s,0))
db.add_param(p)
loader = self.getloader()
for s, d in ds:
ans = loader.load(db, ['--param1', s])
self.assertEqual(getattr(ans, p.name), d)
def test_load_str(self):
ds = [
' ',
'#123',
'as_',
'9 9'
]
db = getdb()
p = ConfigParameter(name='param1')
db.add_param(p)
loader = self.getloader()
for s in ds:
ans = loader.load(db, ['--param1', s])
self.assertEqual(getattr(ans, p.name), s)
def test_load_choice(self):
good = ['c1', 'c3', 'c2']
choices = ('c0', 'c1', 'c2', 'c3')
db = getdb()
p = ConfigParameter(name='param1', defaults='c1', choices=choices)
db.add_param(p)
loader = self.getloader()
# try legal ones
for s in good:
ans = loader.load(db, ['--param1', s], generate_default=True)
self.assertEqual(getattr(ans, p.name), s)
# test use default
ans = loader.load(db, [], generate_default=True)
self.assertEqual(getattr(ans, p.name), good[0])
# test illegal value
with self.assertRaises(SystemExit) as se:
loader.load(db, ['--param1', 'no-good'], generate_default=True)
self.assertEqual(se.exception.code, 2)
def test_load_true(self):
cli_opts = {'action':'store_true'}
db = getdb()
p = ConfigParameter(name='param1', defaults=False, loader_opts={'cli':cli_opts})
db.add_param(p)
loader = self.getloader()
ans = loader.load(db, ['--param1'])
self.assertTrue(getattr(ans, p.name))
ans = loader.load(db, [])
self.assertFalse(hasattr(ans, p.name))
ans = loader.load(db, [], generate_default=True)
self.assertFalse(getattr(ans, p.name))
def test_load_false(self):
cli_opts = {'action':'store_false'}
db = getdb()
p = ConfigParameter(name='param1', defaults=True, loader_opts={'cli':cli_opts})
db.add_param(p)
loader = self.getloader()
ans = loader.load(db, ['--param1'])
self.assertFalse(getattr(ans, p.name))
ans = loader.load(db, [], generate_default=True)
self.assertTrue(getattr(ans, p.name))
def test_load_count(self):
cli_opts = {'action':'count'}
db = getdb()
p = ConfigParameter(name='d', defaults=0, loader_opts={'cli':cli_opts})
db.add_param(p)
loader = self.getloader()
ans = loader.load(db, ['-d'], generate_default=True)
self.assertEqual(getattr(ans, p.name), 1)
ans = loader.load(db, [], generate_default=True)
self.assertEqual(getattr(ans, p.name), 0)
ans = loader.load(db, ['-d', '-d', '-d'], generate_default=True)
self.assertEqual(getattr(ans, p.name), 3)
c = random.randint(0, 256)
ans = loader.load(db, ['-'+'d'*c], generate_default=True)
self.assertEqual(getattr(ans, p.name), c)
class TestDefaultValueLoader(unittest.TestCase):
def getloader(self, platform=None):
return DefaultValueLoader(platform)
def test_load_plain_def(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='intparam', defaults=0)
db.add_param(p)
p = ConfigParameter(name='strparam', defaults='blah blah blah')
db.add_param(p)
p = ConfigParameter(name='noneparam')
db.add_param(p)
ans = loader.load(db)
self.assertEqual(ans.intparam, 0)
self.assertEqual(ans.strparam, 'blah blah blah')
self.assertIsNone(ans.noneparam)
def test_load_cur_platform(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='param', defaults={sys.platform:'myval', '*':'otherval'})
db.add_param(p)
ans = loader.load(db)
self.assertEqual(ans.param, 'myval')
def test_load_other_platform(self):
defs = {
'linux': 'linuxval',
'win': 'win32val',
'*': 'otherval'
}
db = getdb()
p = ConfigParameter(name='param', defaults=defs)
db.add_param(p)
loader = self.getloader('linux')
ans = loader.load(db)
self.assertEqual(ans.param, 'linuxval')
loader = self.getloader('darwin')
ans = loader.load(db)
self.assertEqual(ans.param, 'otherval')
loader = self.getloader('win')
ans = loader.load(db)
self.assertEqual(ans.param, 'win32val')
def test_load_with_type(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='param', type=lambda x:int(x,0), defaults='0xffff')
db.add_param(p)
ans = loader.load(db)
self.assertEqual(type(ans.param), int)
self.assertEqual(ans.param, 0xffff)
def test_load_overwrite(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='param', defaults='defval')
db.add_param(p)
ans = loader.load(db)
self.assertEqual(ans.param, 'defval')
ans.param = 'modified'
self.assertEqual(ans.param, 'modified')
from io import StringIO
class TestConfigFileLoader(unittest.TestCase):
def setUp(self):
self.config_file = StringIO('''
[DEFAULT]
# default section values
topParam1 = 1
topParam2 = "s-value"
topParam3 =
[section1]
secParam1 = 1 2 3
secParam2 =
[section3]
secParam2 = somevalue
''')
def getloader(self):
return ConfigFileLoader()
def test_load_plain_value(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='topParam1')
db.add_param(p)
p = ConfigParameter(name='topParam2')
db.add_param(p)
p = ConfigParameter(name='topParam3')
db.add_param(p)
p = ConfigParameter(name='topParamx')
db.add_param(p)
ans = loader.load(db, self.config_file)
self.assertEqual(ans.topParam1, '1')
self.assertEqual(ans.topParam2, '"s-value"')
self.assertEqual(ans.topParam3, '')
self.assertFalse(hasattr(ans, 'topParamx'))
def test_load_type_cast(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='topParam1', type=int)
db.add_param(p)
p = ConfigParameter(name='topParam2', type=None)
db.add_param(p)
p = ConfigParameter(name='topParamx', type=float)
db.add_param(p)
ans = loader.load(db, self.config_file)
self.assertEqual(type(ans.topParam1), int)
self.assertEqual(ans.topParam1, 1)
self.assertEqual(type(ans.topParam2), str)
self.assertEqual(ans.topParam2, '"s-value"')
self.assertFalse(hasattr(ans, 'topParamx'))
def test_config_section(self):
loader = self.getloader()
db = getdb()
getSection = lambda secname: {'section': secname}
p = ConfigParameter(name='topParam2', loader_opts={'conffile':getSection(None)})
db.add_param(p)
p = ConfigParameter(name='secParam1', loader_opts={'conffile':getSection('section1')})
db.add_param(p)
p = ConfigParameter(name='secParam2', loader_opts={'conffile':getSection('section3')})
db.add_param(p)
p = ConfigParameter(name='secParamx', loader_opts={'conffile':getSection('sectionx')})
db.add_param(p)
ans = loader.load(db, self.config_file)
self.assertEqual(ans.topParam2, '"s-value"')
self.assertEqual(ans.secParam1, '1 2 3')
self.assertEqual(ans.secParam2, 'somevalue')
self.assertFalse(hasattr(ans, 'topParamx'))
def test_load_default(self):
loader = self.getloader()
db = getdb()
p = ConfigParameter(name='topParam3', defaults='def-1')
db.add_param(p)
p = ConfigParameter(
name='secParamx',
type=float, defaults='0',
loader_opts={'conffile':{'section':'section3'}}
)
db.add_param(p)
ans = loader.load(db, self.config_file, generate_default=True)
self.assertEqual(ans.topParam3, '')
self.assertEqual(type(ans.secParamx), float)
self.assertEqual(ans.secParamx, float(0))
class TestConfigFileDumper(unittest.TestCase):
def setUp(self):
self.conf = Namespace()
choices = ['cal1', 'cal2', 'cal3']
setattr(self.conf, 'intparam', 0x77992213)
setattr(self.conf, 'strparam', 'a complicat3d string#!')
setattr(self.conf, 'trueparam', True)
setattr(self.conf, 'falseparam', False)
setattr(self.conf, 'choiceparam', choices[1])
self.db = getdb()
p = ConfigParameter(name='intparam', type=int)
self.db.add_param(p)
p = ConfigParameter(name='strparam', type=str)
self.db.add_param(p)
p = ConfigParameter(name='trueparam', type=bool,
loader_opts={'conffile':{'section':'section_1'}})
self.db.add_param(p)
p = ConfigParameter(name='falseparam', type=bool,
loader_opts={'conffile':{
'converter':lambda x: True if bool(x) and x.lower() != 'false' else False
}})
self.db.add_param(p)
p = ConfigParameter(name='choiceparam', choices=choices)
self.db.add_param(p)
def getloader(self):
return ConfigFileLoader()
def getdumper(self):
return ConfigFileDumper()
def test_dump_config(self):
buf = StringIO()
loader = self.getloader()
dumper = self.getdumper()
ret = dumper.dump(self.db, self.conf, buf)
self.assertNotEqual(ret, 0)
buf.seek(0)
ans = loader.load(self.db, buf)
for k, v in vars(self.conf).items():
self.assertTrue(hasattr(ans, k))
self.assertEqual(type(getattr(ans, k)), type(v))
self.assertEqual(getattr(ans, k), v)
self.assertEqual(ans, self.conf)
class TestOtherUtil(unittest.TestCase):
def test_merge(self):
ns1 = Namespace()
ns2 = Namespace()
ns1.param1 = 123
ns2.param1 = 456
ns1.parama = 'a'
ns2.paramb = ('1', 2, 's')
ans = config.merge_config(ns1, ns2)
self.assertEqual(ns1.param1, 123)
self.assertEqual(ns2.param1, 456)
self.assertEqual(ans.param1, 456)
self.assertEqual(ns1.parama, 'a')
self.assertFalse(hasattr(ns2, 'parama'))
self.assertEqual(ans.parama, 'a')
self.assertFalse(hasattr(ns1, 'paramb'))
self.assertEqual(ns2.paramb, ('1', 2, 's'))
self.assertEqual(ans.paramb, ('1', 2, 's'))
``` |
{
"source": "jinga-lala/stupidNMT",
"score": 2
} |
#### File: comet_ml/callbacks/_base.py
```python
import logging
import six
from .._typing import (
Any,
Generator,
Iterator,
List,
Optional,
Set,
TensorflowInput,
Tuple,
)
from ..experiment import BaseExperiment
from ..predictor import Predictor
from ..utils import Histogram, get_time_monotonic, tensor_length
LOGGER = logging.getLogger(__name__)
def get_standardized_layer_set(layer_list_raw, layer_names):
# type: (List[str], List[str]) -> Set[str]
"""
Given a raw list of possible layer names or indices,
return a unique set of valid layer names.
"""
results = set([])
for item in layer_list_raw:
layer_name = None
try:
layer_name = layer_names[int(item)]
except Exception:
if item in layer_names:
layer_name = item
else:
LOGGER.warning("invalid layer %r; ignoring", item)
if layer_name is None:
continue
if layer_name not in results:
results.add(layer_name)
else:
LOGGER.warning("duplicate use of layer %r; ignoring", item)
return results
def get_layer_num(layer_names, layer_name):
# type: (List[str], str) -> int
"""
Get the layer_num of a layer_name (may have things
appended after a slash).
"""
if "/" in layer_name:
layer_name = layer_name.split("/", 1)[0]
if layer_name in layer_names:
return layer_names.index(layer_name)
else:
return -1
def format_histogram_prefix(
prefix_format, num_layers, model_name, layer_names, layer_name
):
# type: (str, int, str, List[str], str) -> str
"""
Allow user to format a histogram prefix.
"""
max_digits = len(str(num_layers))
layer_num = get_layer_num(layer_names, layer_name) + 1
try:
prefix = prefix_format.format(
model_name=model_name,
layer_num=layer_num,
layer_name=layer_name,
max_digits=max_digits,
)
except Exception:
LOGGER.warning("invalid prefix_format %r; ignoring", prefix_format)
prefix = ""
return prefix
def enumerate_tensors(banks_length, tensors, batch_size):
# type: (int, Any, int) -> Generator[Any, None, None]
"""
Break up inputs and targets into batch sizes.
This can be complicated because the format of inputs
and targets can vary based on the number of banks
in the input layers, and number of banks in the
output layers.
tensors can be:
* a tuple of lists
* a tuple of dicts
"""
if tensor_length(tensors) == 0:
return
if banks_length > 1: # multiple banks
length = tensor_length(tensors[0])
multi = True
else:
length = tensor_length(tensors)
multi = False
current = 0
while current < length:
if multi:
batch = [bank[current : current + batch_size] for bank in tensors]
else:
batch = tensors[current : current + batch_size]
yield batch
current += batch_size
def enumerate_tensor_list(banks_length, tensors, indices):
# type: (int, Any, List[int]) -> Generator[Tuple[int, Any], None, None]
"""
Break up inputs and targets by index.
This can be complicated because the format of inputs
and targets can vary based on the number of banks
in the input layers, and number of banks in the
output layers.
tensors can be:
* a tuple of lists
* a tuple of dicts
"""
if tensor_length(tensors) == 0:
return
if banks_length > 1: # multiple banks
length = tensor_length(tensors[0])
multi = True
else:
length = tensor_length(tensors)
multi = False
for i, index in enumerate(indices):
if index < length:
if multi:
batch = [bank[index : index + 1] for bank in tensors]
else:
batch = tensors[index : index + 1]
else:
batch = None
yield (i, batch)
def get_trainable_variables(model):
# type: (Any) -> List[Any]
if hasattr(model, "trainable_variables"):
return model.trainable_variables
elif hasattr(model, "trainable_weights"):
return model.trainable_weights
else:
return []
def get_tensorflow_gradient_histograms(
model, inputs, targets, batch_size, index_list, layer_set
):
# type: (Any, Any, Any, int, List[int], Set[str]) -> Optional[Iterator[Tuple[str, TensorflowInput, Histogram]]]
# Logging gradients does not work with tensorflow 1.*
try:
from ..tf_utils import get_gradients
except ImportError:
return None
histograms = None
if layer_set != set([]):
weights = [
weight
for weight in get_trainable_variables(model)
if weight.name.split("/", 1)[0] in layer_set
]
else:
weights = get_trainable_variables(model)
if len(weights) == 0:
return None
if index_list != []: # single patterns
input_gen = (
v for (i, v) in enumerate_tensor_list(len(model.inputs), inputs, index_list)
)
target_gen = (
v
for (i, v) in enumerate_tensor_list(len(model.outputs), targets, index_list)
)
all_weight_names = [weight.name for weight in weights]
# For each index:
index = 0
for ins, targs in six.moves.zip(input_gen, target_gen):
gradients = get_gradients(model, ins, targs, weights)
if histograms is None:
histograms = []
names = []
indices = [] # type: List[TensorflowInput]
for i in range(len(gradients)):
histogram = Histogram()
histogram.add(gradients[i])
histograms.append(histogram)
names.append(all_weight_names[i])
indices.append(index_list[index])
index += 1
else:
input_gen = enumerate_tensors(len(model.inputs), inputs, batch_size)
target_gen = enumerate_tensors(len(model.outputs), targets, batch_size)
names = [weight.name for weight in weights]
indices = ["all" for weight in weights] # type: List[TensorflowInput]
for batch_input, batch_target in six.moves.zip(input_gen, target_gen):
gradients = get_gradients(model, batch_input, batch_target, weights)
if histograms is None:
histograms = [Histogram() for i in range(len(gradients))]
for i in range(len(gradients)):
histograms[i].add(gradients[i])
# check to see if all are the same length
return six.moves.zip(names, indices, histograms)
def get_tensorflow_activation_histogram_indices(
model, output_tensor, inputs, index_list
):
# type: (Any, Any, Any, List[int]) -> Optional[List[Histogram]]
# Logging activations does not work with tensorflow 1.11
try:
import tensorflow as tf
except ImportError:
return None
histograms = [Histogram() for i in range(len(index_list))]
try:
function = tf.keras.backend.function(model.inputs, output_tensor)
for i, batch_input in enumerate_tensor_list(
len(model.inputs), inputs, index_list
):
# Batch input is either a one-item input (tensor/ndarray) or a list of one-item
# input (tensor/ndarray)
if batch_input is None:
LOGGER.warning(
"index_list[%s] is %r and beyond length of inputs/targets",
i,
index_list[i],
)
else:
activations = function(batch_input)
histograms[i].add(activations)
return histograms
except tf.errors.InvalidArgumentError:
LOGGER.debug("Error retrieving activation histograms", exc_info=True)
return None
def get_tensorflow_activation_histogram_all(model, output_tensor, inputs, batch_size):
# type: (Any, Any, Any, int) -> Optional[List[Histogram]]
# Logging activations does not work with tensorflow 1.11
try:
import tensorflow as tf
except ImportError:
return None
histogram = Histogram()
try:
function = tf.keras.backend.function(model.inputs, output_tensor)
for batch_input in enumerate_tensors(len(model.inputs), inputs, batch_size):
# Batch input is either a one-item input (tensor/ndarray) or a list of one-item
# input (tensor/ndarray)
activations = function(batch_input)
histogram.add(activations)
return [histogram]
except tf.errors.InvalidArgumentError:
LOGGER.debug("Error retrieving activation histograms", exc_info=True)
return None
def build_base_callback(base):
class BaseCallback(base): # type: ignore
"""
Base Keras callback.
"""
def __init__(self):
super(BaseCallback, self).__init__()
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
def on_train_batch_begin(self, batch, logs=None):
pass
def on_train_batch_end(self, batch, logs=None):
pass
def on_test_batch_begin(self, batch, logs=None):
pass
def on_test_batch_end(self, batch, logs=None):
pass
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_predict_begin(self, logs=None):
pass
def on_predict_end(self, logs=None):
pass
def on_predict_batch_begin(self, batch, logs=None):
pass
def on_predict_batch_end(self, batch, logs=None):
pass
return BaseCallback
def build_predictive_early_stopping_keras_callback(base):
class PredictiveEarlyStoppingKerasCallback(base): # type: ignore
def __init__(self, predictor, predict_epoch_rate=1):
# type: (Predictor, int) -> None
super(PredictiveEarlyStoppingKerasCallback, self).__init__()
self.predictor = predictor
self.predict_epoch_rate = predict_epoch_rate
def on_epoch_end(self, epoch, logs={}):
if epoch % self.predict_epoch_rate == 0:
loss = logs.get(self.predictor.loss_name)
self.predictor.report_loss(loss)
if self.predictor.stop_early(epoch=epoch):
self.model.stop_training = True
return PredictiveEarlyStoppingKerasCallback
def build_empty_keras_callback(base):
class EmptyKerasCallback(base): # type: ignore
"""
Empty Keras callback.
"""
return EmptyKerasCallback
def build_keras_callback(base):
class KerasCallback(base): # type: ignore
""" Keras callback to report params, metrics to Comet.ml Experiment()"""
def __init__(
self,
experiment, # type: BaseExperiment
log_params=None, # type: Optional[bool]
log_metrics=None, # type: Optional[bool]
log_graph=None, # type: Optional[bool]
log_histograms=None, # type: Optional[bool]
inputs=None, # type: Optional[Any]
targets=None, # type: Optional[Any]
):
# type: (...) -> None
"""
Create a new experiment and submit source code.
:param api_key: User's API key. Required.
"""
super(KerasCallback, self).__init__()
self.inputs = inputs
self.targets = targets
# If any log_* parameters are given, give warning and ignore:
if log_params is not None:
experiment._log_once_at_level(
logging.INFO,
"Passing log_params to KerasCallback is deprecated; use experiment.auto_param_logging",
)
if log_metrics is not None:
experiment._log_once_at_level(
logging.INFO,
"Passing log_metrics to KerasCallback is deprecated; use experiment.auto_metric_logging",
)
if log_graph is not None:
experiment._log_once_at_level(
logging.INFO,
"Passing log_graph to KerasCallback is deprecated; use experiment.log_graph",
)
if log_histograms is not None:
experiment._log_once_at_level(
logging.INFO,
"Passing log_histograms to KerasCallback is deprecated; use experiment.auto_histogram_*_logging",
)
# Inits the experiment with reference to the name of this class. Required for loading the correct
# script file
self.experiment = experiment
self.epoch_start_time = None # type: Optional[float]
self.our_step = 0
self.our_epoch = 0
self.activation_ignore_list = ["flatten", "dropout", "activation"]
def on_epoch_begin(self, epoch, logs=None):
try:
# This function should only be called during train mode.
LOGGER.debug("On epoch begin %s %s", epoch, logs)
self.experiment.set_epoch(self.our_epoch)
self.epoch_start_time = get_time_monotonic()
if self.our_epoch == 0:
self._log_histograms()
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_epoch_begin; ignoring",
exc_info=True,
)
def on_epoch_end(self, epoch, logs=None):
try:
# This function should only be called during train mode.
LOGGER.debug("On epoch end %s %s", epoch, logs)
if self.experiment.auto_metric_logging:
if self.epoch_start_time is not None:
self.experiment._log_metric(
"epoch_duration",
get_time_monotonic() - self.epoch_start_time,
step=self.our_step,
epoch=self.our_epoch,
framework="keras",
)
self.epoch_start_time = None
self.experiment.log_epoch_end(self.our_epoch, step=self.our_step)
if logs:
for name, val in logs.items():
self.experiment._log_metric(
name,
val,
step=self.our_step,
epoch=self.our_epoch,
framework="keras",
)
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_epoch_end; ignoring",
exc_info=True,
)
self.our_epoch += 1
try:
if self.experiment._check_histogram_epoch_report_rate(self.our_epoch):
self._log_histograms()
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_epoch_end; ignoring",
exc_info=True,
)
def _log_histograms(self):
prefix_format = self.experiment.config.get_string(
None, "comet.keras.histogram_name_prefix"
)
batch_size = self.experiment.config.get_int(
None, "comet.keras.histogram_batch_size"
)
# Pre-compute some common variables
num_layers = len(self.model.layers)
model_name = self.model.name
layer_names = [layer.name for layer in self.model.layers]
self._log_weights_histograms(
prefix_format, num_layers, model_name, layer_names, batch_size
)
self._log_gradients_histograms(
prefix_format, num_layers, model_name, layer_names, batch_size
)
self._log_activations_histograms(
prefix_format, num_layers, model_name, layer_names, batch_size
)
def _log_weights_histograms(
self, prefix_format, num_layers, model_name, layer_names, batch_size
):
# type: (str, int, str, List[str], int) -> None
if self.experiment.auto_histogram_weight_logging is False:
return None
try:
for layer in self.model.layers:
weights = layer.get_weights()
if len(weights) == len(layer.weights):
for i in range(len(layer.weights)):
prefix = format_histogram_prefix(
prefix_format,
num_layers,
model_name,
layer_names,
layer.weights[i].name,
)
self.experiment.log_histogram_3d(
weights[i],
name=layer.weights[i].name,
step=self.our_step,
epoch=self.our_epoch,
metadata={"prefix": prefix},
)
else:
LOGGER.warning(
"keras layer.weights and layer.get_weights() are different lengths; ignoring weight histogram"
)
except Exception:
LOGGER.debug("error attempting to log weights; ignoring", exc_info=True)
def _log_gradients_histograms(
self, prefix_format, num_layers, model_name, layer_names, batch_size
):
# type: (str, int, str, List[str], int) -> None
if self.experiment.auto_histogram_gradient_logging is False:
return None
else:
if self.inputs is None or self.targets is None:
self.experiment._log_once_at_level(
logging.WARNING,
"auto_histogram_gradient_logging is True, but inputs and targets are not available; unable to log gradients",
)
return None
try:
gradient_index_list = self.experiment.config.get_int_list(
None, "comet.keras.histogram_gradient_index_list"
)
except Exception:
LOGGER.warning(
"malformed `comet.keras.histogram_gradient_index_list`; should be a string of comma-separated integers; ignoring",
exc_info=True,
)
# If we don't have index, early-return as we won't generate any histogram
return None
try:
gradient_layer_list_raw = self.experiment.config.get_string_list(
None, "comet.keras.histogram_gradient_layer_list"
)
except Exception:
LOGGER.warning(
"malformed `comet.keras.histogram_gradient_layer_list`; should be a string of comma-separated integers and/or names; ignoring",
exc_info=True,
)
# If we don't have names, early-return as we won't generate any histogram
return None
gradient_layer_set = get_standardized_layer_set(
gradient_layer_list_raw, layer_names
)
try:
histograms = get_tensorflow_gradient_histograms(
self.model,
self.inputs,
self.targets,
batch_size,
gradient_index_list,
gradient_layer_set,
)
if histograms is not None:
for layer_name, index, histogram in histograms:
prefix = format_histogram_prefix(
prefix_format,
num_layers,
model_name,
layer_names,
layer_name,
)
self.experiment.log_histogram_3d(
histogram,
name="/".join([layer_name, ("gradients:%s" % index)]),
step=self.our_step,
epoch=self.our_epoch,
metadata={"prefix": prefix},
)
except Exception:
LOGGER.debug(
"error attempting to log gradients; ignoring", exc_info=True
)
def _log_activations_histograms(
self, prefix_format, num_layers, model_name, layer_names, batch_size
):
# type: (str, int, str, List[str], int) -> None
if self.experiment.auto_histogram_activation_logging is False:
return None
else:
if self.inputs is None:
self.experiment._log_once_at_level(
logging.WARNING,
"auto_histogram_activation_logging is True, but inputs are not available; unable to log activations",
)
return None
try:
activation_index_list = self.experiment.config.get_int_list(
None, "comet.keras.histogram_activation_index_list"
)
except Exception:
LOGGER.warning(
"malformed `comet.keras.histogram_activation_index_list`; should be a string of comma-separated integers; ignoring",
exc_info=True,
)
# If we don't have index, early-return as we won't generate any histogram
return None
try:
activation_layer_list_raw = self.experiment.config.get_string_list(
None, "comet.keras.histogram_activation_layer_list"
)
except Exception:
LOGGER.warning(
"malformed `comet.keras.histogram_activation_layer_list`; should be a string of comma-separated integers and/or names; ignoring",
exc_info=True,
)
# If we don't have names, early-return as we won't generate any histogram
return None
activation_layer_set = get_standardized_layer_set(
activation_layer_list_raw, layer_names
)
try:
for layer in self.model.layers:
if activation_layer_set == set([]):
if any(
(ignore in layer.name)
for ignore in self.activation_ignore_list
):
continue
elif layer.name not in activation_layer_set:
continue
LOGGER.debug("histogram activation processing %s...", layer.name)
if activation_index_list == []: # all
histograms = get_tensorflow_activation_histogram_all(
self.model, layer.output, self.inputs, batch_size
)
else:
histograms = get_tensorflow_activation_histogram_indices(
self.model,
layer.output,
self.inputs,
activation_index_list,
)
if histograms is not None:
for i, histogram in enumerate(histograms):
if activation_index_list == []:
name = "all" # type: TensorflowInput
else:
name = activation_index_list[i] # type: TensorflowInput
prefix = format_histogram_prefix(
prefix_format,
num_layers,
model_name,
layer_names,
layer.name,
)
self.experiment.log_histogram_3d(
histogram,
name="/".join(
[layer.name, ("activations:%s" % name)]
), # index of input tensor
step=self.our_step,
epoch=self.our_epoch,
metadata={"prefix": prefix},
)
except Exception:
LOGGER.debug(
"error attempting to log activations; ignoring", exc_info=True
)
return None
def on_batch_begin(self, batch, logs=None):
try:
# This function called directly when in train mode.
LOGGER.debug("On batch begin %s %s", batch, logs)
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_batch_begin; ignoring",
exc_info=True,
)
def on_batch_end(self, batch, logs=None):
"""
Logs training metrics.
"""
try:
# This function called directly when in train mode.
LOGGER.debug("On batch end %s %s", batch, logs)
self.our_step += 1
# Use the batch from keras, as it starts over each epoch:
if self.experiment._check_metric_step_report_rate(batch):
self._send_batch_messages(logs)
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_batch_end; ignoring",
exc_info=True,
)
def on_train_batch_end(self, batch, logs=None):
try:
# No context added here, to match previous behavior:
self.on_batch_end(batch, logs)
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_train_batch_end; ignoring",
exc_info=True,
)
def on_test_batch_end(self, batch, logs=None):
try:
with self.experiment.validate():
self.on_batch_end(batch, logs)
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_test_batch_end; ignoring",
exc_info=True,
)
def _send_batch_messages(self, logs):
if logs and self.experiment.auto_metric_logging:
for name, val in logs.items():
self.experiment._log_metric(
"batch_" + name, val, step=self.our_step, framework="keras"
)
def on_train_begin(self, logs=None):
"""
Sets model graph.
"""
try:
LOGGER.debug("On train begin %s", logs)
if self.experiment.log_graph:
model_graph = get_keras_model(self.experiment, self.model)
if model_graph:
self.experiment._set_model_graph(model_graph, framework="keras")
else:
LOGGER.debug("Empty graph model, skipping")
try:
trainable_params = self.model.count_params()
self.experiment._log_other(
"trainable_params", trainable_params, framework="keras"
)
except Exception:
LOGGER.debug("Failed to count params in model", exc_info=True)
if self.experiment.auto_param_logging:
if logs:
for k, v in logs.items():
self.experiment._log_parameter(k, v, framework="keras")
# Keras Callback doesn't set this parameter at creation by default
if hasattr(self, "params") and self.params:
for k, v in self.params.items():
if k != "metrics":
self.experiment._log_parameter(k, v, framework="keras")
try:
optimizer_name = self.model.optimizer.__class__.__name__
config = self.model.optimizer.get_config()
for key, value in config.items():
self.experiment._log_parameter(
optimizer_name + "_" + key, value, framework="keras"
)
except Exception:
LOGGER.debug(
"Failed to extract optimizer information", exc_info=True
)
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_train_begin; ignoring",
exc_info=True,
)
def on_train_end(self, *args, **kwargs):
try:
LOGGER.debug("On train end %r", locals())
except Exception:
LOGGER.warning(
"An unknown exception happened in Keras callback on_train_end; ignoring",
exc_info=True,
)
return KerasCallback
def get_keras_model(experiment, model):
# type: (BaseExperiment, Any) -> Any
# With multi-gpu models we save the original model in the experiment
# storage
storage_key = "gpu_model_%s" % id(model)
json_model = experiment._storage["keras"]["json_model"].get(storage_key, None)
if json_model is not None:
return json_model
else:
return model
```
#### File: site-packages/comet_ml/__init__.py
```python
from __future__ import print_function
import logging
import traceback
from ._logging import (
ADD_SYMLINK_ERROR,
ADD_TAGS_ERROR,
EXPERIMENT_LIVE,
INTERNET_CONNECTION_ERROR,
INVALID_API_KEY,
REGISTER_RPC_FAILED,
SEND_NOTIFICATION_FAILED,
setup,
setup_http_handler,
)
from ._reporting import EXPERIMENT_CREATED, EXPERIMENT_CREATION_FAILED
from ._typing import Any, Dict, Optional
from .api import API, APIExperiment
from .comet import Streamer, format_url, generate_guid, is_valid_experiment_key
from .config import ( # noqa
get_api_key,
get_config,
get_global_experiment,
get_previous_experiment,
get_ws_url,
)
from .confusion_matrix import ConfusionMatrix
from .connection import (
INITIAL_BEAT_DURATION,
RestServerConnection,
WebSocketConnection,
get_backend_address,
get_comet_api_client,
get_rest_api_client,
log_url,
)
from .exceptions import BadCallbackArguments, InvalidAPIKey
from .experiment import BaseExperiment
from .feature_toggles import HTTP_LOGGING, FeatureToggles
from .json_encoder import NestedEncoder
from .loggers.fastai_logger import patch as fastai_patch
from .loggers.keras_logger import patch as keras_patch
from .loggers.lightgbm_logger import patch as lgbm_patch
from .loggers.mlflow_logger import patch as mlflow_patch
from .loggers.pytorch_logger import patch as pytorch_patch
from .loggers.shap_logger import patch as shap_patch
from .loggers.sklearn_logger import patch as sklearn_patch
from .loggers.tensorboard_logger import patch as tb_patch
from .loggers.tensorflow_logger import patch as tf_patch
from .loggers.tfma_logger import patch as tfma_patch
from .loggers.xgboost_logger import patch as xg_patch
from .logging_messages import ONLINE_EXPERIMENT_THROTTLED
from .monkey_patching import CometModuleFinder
from .offline import OfflineExperiment
from .optimizer import Optimizer
from .rpc import create_remote_call, get_remote_action_definition
from .utils import Embedding, Histogram, get_comet_version, merge_url, valid_ui_tabs
__author__ = "Gideon<<EMAIL>>"
__all__ = [
"API",
"APIExperiment",
"ConfusionMatrix",
"Embedding",
"ExistingExperiment",
"Experiment",
"get_comet_api_client",
"Histogram",
"OfflineExperiment",
"Optimizer",
]
__version__ = get_comet_version()
LOGGER = logging.getLogger(__name__)
if not get_config("comet.disable_auto_logging"):
# Activate the monkey patching
MODULE_FINDER = CometModuleFinder()
keras_patch(MODULE_FINDER)
sklearn_patch(MODULE_FINDER)
tf_patch(MODULE_FINDER)
tb_patch(MODULE_FINDER)
pytorch_patch(MODULE_FINDER)
fastai_patch(MODULE_FINDER)
mlflow_patch(MODULE_FINDER)
xg_patch(MODULE_FINDER)
tfma_patch(MODULE_FINDER)
shap_patch(MODULE_FINDER)
lgbm_patch(MODULE_FINDER)
MODULE_FINDER.start()
# Configure the logging
setup(get_config())
def start():
"""
If you are not using an Experiment in your first loaded Python file, you
must import `comet_ml` and call `comet_ml.start` before any other imports
to ensure that comet.ml is initialized correctly.
"""
class Experiment(BaseExperiment):
"""
Experiment is a unit of measurable research that defines a single run with some data/parameters/code/results.
Creating an Experiment object in your code will report a new experiment to your Comet.ml project. Your Experiment
will automatically track and collect many things and will also allow you to manually report anything.
You can create multiple objects in one script (such as when looping over multiple hyper parameters).
"""
def __init__(
self,
api_key=None, # type: Optional[str]
project_name=None, # type: Optional[str]
workspace=None, # type: Optional[str]
log_code=True, # type: Optional[bool]
log_graph=True, # type: Optional[bool]
auto_param_logging=True, # type: Optional[bool]
auto_metric_logging=True, # type: Optional[bool]
parse_args=True, # type: Optional[bool]
auto_output_logging="default", # type: Optional[str]
log_env_details=True, # type: Optional[bool]
log_git_metadata=True, # type: Optional[bool]
log_git_patch=True, # type: Optional[bool]
disabled=False, # type: Optional[bool]
log_env_gpu=True, # type: Optional[bool]
log_env_host=True, # type: Optional[bool]
display_summary=None, # type: Optional[bool]
log_env_cpu=True, # type: Optional[bool]
display_summary_level=1, # type: Optional[int]
optimizer_data=None, # type: Optional[Dict[str, Any]]
auto_weight_logging=None, # type: Optional[bool]
auto_log_co2=True, # type: Optional[bool]
auto_metric_step_rate=10, # type: Optional[int]
auto_histogram_tensorboard_logging=False, # type: Optional[bool]
auto_histogram_epoch_rate=1, # type: Optional[int]
auto_histogram_weight_logging=False, # type: Optional[bool]
auto_histogram_gradient_logging=False, # type: Optional[bool]
auto_histogram_activation_logging=False, # type: Optional[bool]
):
"""
Creates a new experiment on the Comet.ml frontend.
Args:
api_key: Your API key obtained from comet.ml
project_name: Optional. Send your experiment to a specific project. Otherwise will be sent to `Uncategorized Experiments`.
If project name does not already exists Comet.ml will create a new project.
workspace: Optional. Attach an experiment to a project that belongs to this workspace
log_code: Default(True) - allows you to enable/disable code logging
log_graph: Default(True) - allows you to enable/disable automatic computation graph logging.
auto_param_logging: Default(True) - allows you to enable/disable hyper parameters logging
auto_metric_logging: Default(True) - allows you to enable/disable metrics logging
auto_metric_step_rate: Default(10) - controls how often batch metrics are logged
auto_histogram_tensorboard_logging: Default(False) - allows you to enable/disable automatic tensorboard histogram logging
auto_histogram_epoch_rate: Default(1) - controls how often histograms are logged
auto_histogram_weight_logging: Default(False) - allows you to enable/disable histogram logging for biases and weights
auto_histogram_gradient_logging: Default(False) - allows you to enable/disable automatic histogram logging of gradients
auto_histogram_activation_logging: Default(False) - allows you to enable/disable automatic histogram logging of activations
auto_output_logging: Default("default") - allows you to select
which output logging mode to use. You can pass `"native"`
which will log all output even when it originated from a C
native library. You can also pass `"simple"` which will work
only for output made by Python code. If you want to disable
automatic output logging, you can pass `False`. The default is
`"default"` which will detect your environment and deactivate
the output logging for IPython and Jupyter environment and
sets `"native"` in the other cases.
auto_log_co2: Default(True) - automatically tracks the CO2 emission of
this experiment if `codecarbon` package is installed in the environment
parse_args: Default(True) - allows you to enable/disable automatic parsing of CLI arguments
log_env_details: Default(True) - log various environment
information in order to identify where the script is running
log_env_gpu: Default(True) - allow you to enable/disable the
automatic collection of gpu details and metrics (utilization, memory usage etc..).
`log_env_details` must also be true.
log_env_cpu: Default(True) - allow you to enable/disable the
automatic collection of cpu details and metrics (utilization, memory usage etc..).
`log_env_details` must also be true.
log_env_host: Default(True) - allow you to enable/disable the
automatic collection of host information (ip, hostname, python version, user etc...).
`log_env_details` must also be true.
log_git_metadata: Default(True) - allow you to enable/disable the
automatic collection of git details
log_git_patch: Default(True) - allow you to enable/disable the
automatic collection of git patch
display_summary_level: Default(1) - control the summary detail that is
displayed on the console at end of experiment. If 0, the summary
notification is still sent. Valid values are 0 to 2.
disabled: Default(False) - allows you to disable all network
communication with the Comet.ml backend. It is useful when you
want to test to make sure everything is working, without actually
logging anything.
"""
self.config = get_config()
self.api_key = get_api_key(api_key, self.config)
if self.api_key is None:
raise ValueError(
"Comet.ml requires an API key. Please provide as the "
"first argument to Experiment(api_key) or as an environment"
" variable named COMET_API_KEY "
)
super(Experiment, self).__init__(
project_name=project_name,
workspace=workspace,
log_code=log_code,
log_graph=log_graph,
auto_param_logging=auto_param_logging,
auto_metric_logging=auto_metric_logging,
parse_args=parse_args,
auto_output_logging=auto_output_logging,
log_env_details=log_env_details,
log_git_metadata=log_git_metadata,
log_git_patch=log_git_patch,
disabled=disabled,
log_env_gpu=log_env_gpu,
log_env_host=log_env_host,
display_summary=display_summary, # deprecated
display_summary_level=display_summary_level,
log_env_cpu=log_env_cpu,
optimizer_data=optimizer_data,
auto_weight_logging=auto_weight_logging, # deprecated
auto_log_co2=auto_log_co2,
auto_metric_step_rate=auto_metric_step_rate,
auto_histogram_tensorboard_logging=auto_histogram_tensorboard_logging,
auto_histogram_epoch_rate=auto_histogram_epoch_rate,
auto_histogram_weight_logging=auto_histogram_weight_logging,
auto_histogram_gradient_logging=auto_histogram_gradient_logging,
auto_histogram_activation_logging=auto_histogram_activation_logging,
)
self.ws_connection = None
self.connection = None # type: Optional[RestServerConnection]
self.rest_api_client = None
if self.disabled is not True:
self._start()
if self.alive is True:
self._report(event_name=EXPERIMENT_CREATED)
LOGGER.info(EXPERIMENT_LIVE, self._get_experiment_url())
def _setup_http_handler(self):
if not self.feature_toggles[HTTP_LOGGING]:
LOGGER.debug("Do not setup http logger, disabled by feature toggle")
return
self.http_handler = setup_http_handler(
log_url(get_backend_address()), self.api_key, self.id
)
def _setup_streamer(self):
"""
Do the necessary work to create mandatory objects, like the streamer
and feature flags
"""
server_address = get_backend_address()
self.connection = RestServerConnection(
self.api_key, self.id, server_address, self.config["comet.timeout.http"]
)
self.rest_api_client = get_rest_api_client("v2", api_key=self.api_key)
try:
results = self._authenticate()
if results:
ws_server_from_backend, initial_offset = results
else:
ws_server_from_backend, initial_offset = None, None
except ValueError:
tb = traceback.format_exc()
LOGGER.error(INTERNET_CONNECTION_ERROR, exc_info=True)
self._report(event_name=EXPERIMENT_CREATION_FAILED, err_msg=tb)
return False
# Authentication failed somehow
if ws_server_from_backend is None:
return False
ws_server = get_ws_url(ws_server_from_backend, self.config)
# Generate the full ws url based on ws server
full_ws_url = format_url(ws_server, apiKey=self.api_key, runId=self.run_id)
# Setup the HTTP handler
self._setup_http_handler()
# Initiate the streamer
self._initialize_streamer(full_ws_url, initial_offset)
return True
def _authenticate(self):
"""
Do the handshake with the Backend to authenticate the api key and get
various parameters and settings
"""
# Get an id for this run
run_id_results = self.connection.get_run_id(self.project_name, self.workspace)
(
self.run_id,
ws_server,
self.project_id,
self.is_github,
self.focus_link,
self.upload_limit,
self.asset_upload_limit,
feature_toggles,
initial_offset,
self.upload_web_asset_url_prefix,
self.upload_web_image_url_prefix,
self.upload_api_asset_url_prefix,
self.upload_api_image_url_prefix,
) = run_id_results
self.feature_toggles = FeatureToggles(feature_toggles, self.config)
return (ws_server, initial_offset)
def _initialize_streamer(self, full_ws_url, initial_offset):
"""
Initialize the streamer with the websocket url received during the
handshake.
"""
# Initiate the streamer
self.ws_connection = WebSocketConnection(full_ws_url, self.connection)
self.ws_connection.start()
self.ws_connection.wait_for_connection()
self.streamer = Streamer(
self.ws_connection,
INITIAL_BEAT_DURATION,
self.connection,
initial_offset,
self.id,
self.api_key,
self.run_id,
self.project_id,
self.rest_api_client,
self._on_pending_rpcs_callback,
self.config["comet.timeout.cleaning"],
self.config["comet.timeout.upload"],
)
# Start streamer thread.
self.streamer.start()
def _mark_as_started(self):
try:
self.connection.update_experiment_status(
self.run_id, self.project_id, self.alive
)
except Exception:
LOGGER.error("Failed to report experiment status", exc_info=True)
def _mark_as_ended(self):
if self.alive:
try:
self.connection.update_experiment_status(
self.run_id, self.project_id, False
)
except Exception:
LOGGER.error("Failed to report experiment status", exc_info=True)
def _report(self, *args, **kwargs):
self.connection.report(*args, **kwargs)
def _on_end(self, wait=True):
""" Called when the Experiment is replaced by another one or at the
end of the script
"""
successful_clean = super(Experiment, self)._on_end(wait=wait)
if not successful_clean:
LOGGER.warning("Failed to log run in comet.ml")
else:
if self.alive:
LOGGER.info(EXPERIMENT_LIVE, self._get_experiment_url())
# If we didn't drain the streamer, don't close the websocket connection
if self.ws_connection is not None and wait is True:
self.ws_connection.close()
LOGGER.debug("Waiting for WS connection to close")
if wait is True:
ws_cleaned = self.ws_connection.wait_for_finish()
if ws_cleaned is True:
LOGGER.debug("Websocket connection clean successfully")
else:
LOGGER.debug("Websocket connection DIDN'T clean successfully")
successful_clean = False
self.ws_connection.force_close()
if self.connection is not None:
self.connection.close()
# Display throttling message
try:
if self._check_experiment_throttled():
LOGGER.warning(ONLINE_EXPERIMENT_THROTTLED)
except Exception:
LOGGER.debug("Failed to check experiment metadata", exc_info=True)
return successful_clean
def _check_experiment_throttled(self):
experiment_metadata = self.rest_api_client.get_experiment_metadata(self.id)
return experiment_metadata.get("throttle", False)
@property
def url(self):
"""
Get the url of the experiment.
Example:
```python
>>> api_experiment.url
"https://www.comet.ml/username/34637643746374637463476"
```
"""
return self._get_experiment_url()
def _get_experiment_url(self, tab=None):
if self.focus_link:
if tab:
if tab in valid_ui_tabs():
return merge_url(
self.focus_link + self.id,
{"experiment-tab": valid_ui_tabs(tab)},
)
else:
LOGGER.info("tab must be one of: %r", valid_ui_tabs(preferred=True))
return self.focus_link + self.id
return ""
def _on_pending_rpcs_callback(self):
""" Called by streamer when we have pending rpcs
"""
LOGGER.debug("Checking pending rpcs")
calls = self.connection.get_pending_rpcs()["remoteProcedureCalls"]
LOGGER.debug("Got pending rpcs: %r", calls)
for raw_call in calls:
call = create_remote_call(raw_call)
if call is None:
continue
self._add_pending_call(call)
def _send_rpc_callback_result(
self, call_id, remote_call_result, start_time, end_time
):
# Send the result to the backend
self.connection.send_rpc_result(
call_id, remote_call_result, start_time, end_time
)
def create_symlink(self, project_name):
"""
creates a symlink for this experiment in another project.
The experiment will now be displayed in the project provided and the original project.
Args:
project_name: String. represents the project name. Project must exists.
"""
try:
if self.alive:
self.connection.send_new_symlink(project_name)
except Exception:
LOGGER.warning(ADD_SYMLINK_ERROR, project_name, exc_info=True)
def add_tag(self, tag):
"""
Add a tag to the experiment. Tags will be shown in the dashboard.
Args:
tag: String. A tag to add to the experiment.
"""
try:
if self.alive:
self.connection.add_tags([tag])
super(Experiment, self).add_tag(tag)
except Exception:
LOGGER.warning(ADD_TAGS_ERROR, tag, exc_info=True)
def add_tags(self, tags):
"""
Add several tags to the experiment. Tags will be shown in the
dashboard.
Args:
tag: List<String>. Tags list to add to the experiment.
"""
try:
if self.alive:
self.connection.add_tags(tags)
# If we successfully send them to the backend, save them locally
super(Experiment, self).add_tags(tags)
except Exception:
LOGGER.warning(ADD_TAGS_ERROR, tags, exc_info=True)
def register_callback(self, remote_action):
"""
Register the remote_action passed as argument to be a RPC.
Args:
remote_action: Callable.
"""
super(Experiment, self).register_callback(remote_action)
try:
remote_action_definition = get_remote_action_definition(remote_action)
except BadCallbackArguments as exc:
# Don't keep bad callbacks registered
self.unregister_callback(remote_action)
LOGGER.warning(str(exc), exc_info=True)
return
try:
self._register_callback_remotely(remote_action_definition)
except Exception:
# Don't keep bad callbacks registered
self.unregister_callback(remote_action)
LOGGER.warning(
REGISTER_RPC_FAILED, remote_action_definition["functionName"]
)
def _register_callback_remotely(self, remote_action_definition):
self.connection.register_rpc(remote_action_definition)
def send_notification(self, title, status=None, additional_data=None):
# type: (str, Optional[str], Optional[Dict[str, Any]]) -> None
"""
Send yourself a notification through email when an experiment
ends.
Args:
title: str - the email subject.
status: str - the final status of the experiment. Typically,
something like "finished", "completed" or "aborted".
additional_data: dict - a dictionary of key/values to notify.
Note:
In order to receive the notification, you need to have turned
on Notifications in your Settings in the Comet user interface.
If you wish to have the `additional_data` saved with the
experiment, you should also call `Experiment.log_other()` with
this data as well.
This method uses the email address associated with your account.
"""
try:
name = self.others.get("Name")
if additional_data is None:
additional_data = {}
self.connection.send_notification(
title,
status,
name,
self._get_experiment_url(),
additional_data,
custom_encoder=NestedEncoder,
)
except Exception:
LOGGER.error(SEND_NOTIFICATION_FAILED, exc_info=True)
def log_embedding(
self,
vectors,
labels,
image_data=None,
image_size=None,
image_preprocess_function=None,
image_transparent_color=None,
image_background_color_function=None,
title="Comet Embedding",
template_filename="template_projector_config.json",
group=None,
):
"""
Log a multi-dimensional dataset and metadata for viewing with
Comet's Embedding Projector (experimental).
Args:
vectors: the tensors to visualize in 3D
labels: labels for each tensor
image_data: (optional) list of arrays or Images
image_size: (optional, required if image_data is given) the size of each image
image_preprocess_function: (optional) if image_data is an
array, apply this function to each element first
image_transparent_color: a (red, green, blue) tuple
image_background_color_function: a function that takes an
index, and returns a (red, green, blue) color tuple
title: (optional) name of tensor
template_filename: (optional) name of template JSON file
See also: `Experiment._log_embedding_list()` and `comet_ml.Embedding`
Example:
```python
from comet_ml import Experiment
import numpy as np
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
def label_to_color(index):
label = y_test[index]
if label == 0:
return (255, 0, 0)
elif label == 1:
return (0, 255, 0)
elif label == 2:
return (0, 0, 255)
elif label == 3:
return (255, 255, 0)
elif label == 4:
return (0, 255, 255)
elif label == 5:
return (128, 128, 0)
elif label == 6:
return (0, 128, 128)
elif label == 7:
return (128, 0, 128)
elif label == 8:
return (255, 0, 255)
elif label == 9:
return (255, 255, 255)
experiment = Experiment(project_name="projector-embedding")
experiment.log_embedding(
vectors=x_test,
labels=y_test,
image_data=x_test,
image_preprocess_function=lambda matrix: np.round(matrix/255,0) * 2,
image_transparent_color=(0, 0, 0),
image_size=(28, 28),
image_background_color_function=label_to_color,
)
```
"""
if not self.alive:
return None
LOGGER.warning(
"Logging embedding is experimental - the API and logged data are subject to change"
)
embedding = self._create_embedding(
vectors,
labels,
image_data,
image_size,
image_preprocess_function,
image_transparent_color,
image_background_color_function,
title,
)
if group is not None:
self._embedding_groups[group].append(embedding)
return embedding
else:
# Log the template:
template = {"embeddings": [embedding.to_json()]}
return self._log_asset_data(
template, template_filename, asset_type="embeddings"
)
class ExistingExperiment(Experiment):
"""Existing Experiment allows you to report information to an
experiment that already exists on comet.ml and is not currently
running. This is useful when your training and testing happen on
different scripts.
For example:
train.py:
```
exp = Experiment(api_key="my-key")
score = train_model()
exp.log_metric("train accuracy", score)
```
Now obtain the experiment key from comet.ml. If it's not visible
on your experiment table you can click `Customize` and add it as a
column.
test.py:
```
exp = ExistingExperiment(api_key="my-key",
previous_experiment="your experiment key from comet.ml")
score = test_model()
exp.log_metric("test accuracy", score)
```
Alternatively, you can pass the api_key via an environment
variable named `COMET_API_KEY` and the previous experiment id via
an environment variable named `COMET_EXPERIMENT_KEY` and omit them
from the ExistingExperiment constructor:
```
exp = ExistingExperiment()
score = test_model()
exp.log_metric("test accuracy", score)
```
"""
def __init__(self, api_key=None, previous_experiment=None, **kwargs):
"""
Append to an existing experiment on the Comet.ml frontend.
Args:
api_key: Your API key obtained from comet.ml
previous_experiment: Optional. Your experiment key from comet.ml, could be set through
configuration as well.
project_name: Optional. Send your experiment to a specific project. Otherwise will be sent to `Uncategorized Experiments`.
If project name does not already exists Comet.ml will create a new project.
workspace: Optional. Attach an experiment to a project that belongs to this workspace
log_code: Default(False) - allows you to enable/disable code logging
log_graph: Default(False) - allows you to enable/disable automatic computation graph logging.
auto_param_logging: Default(True) - allows you to enable/disable hyper parameters logging
auto_metric_logging: Default(True) - allows you to enable/disable metrics logging
auto_metric_step_rate: Default(10) - controls how often batch metrics are logged
auto_histogram_tensorboard_logging: Default(False) - allows you to enable/disable automatic tensorboard histogram logging
auto_histogram_epoch_rate: Default(1) - controls how often histograms are logged
auto_histogram_weight_logging: Default(False) - allows you to enable/disable automatic histogram logging of biases and weights
auto_histogram_gradient_logging: Default(False) - allows you to enable/disable automatic histogram logging of gradients
auto_histogram_activation_logging: Default(False) - allows you to enable/disable automatic histogram logging of activations
auto_output_logging: Default("default") - allows you to select
which output logging mode to use. You can pass `"native"`
which will log all output even when it originated from a C
native library. You can also pass `"simple"` which will work
only for output made by Python code. If you want to disable
automatic output logging, you can pass `False`. The default is
`"default"` which will detect your environment and deactivate
the output logging for IPython and Jupyter environment and
sets `"native"` in the other cases.
auto_log_co2: Default(True) - automatically tracks the CO2 emission of
this experiment if `codecarbon` package is installed in the environment
parse_args: Default(False) - allows you to enable/disable automatic parsing of CLI arguments
log_env_details: Default(False) - log various environment
information in order to identify where the script is running
log_env_gpu: Default(False) - allow you to enable/disable the
automatic collection of gpu details and metrics (utilization, memory usage etc..).
`log_env_details` must also be true.
log_env_cpu: Default(False) - allow you to enable/disable the
automatic collection of cpu details and metrics (utilization, memory usage etc..).
`log_env_details` must also be true.
log_env_host: Default(False) - allow you to enable/disable the
automatic collection of host information (ip, hostname, python version, user etc...).
`log_env_details` must also be true.
log_git_metadata: Default(False) - allow you to enable/disable the
automatic collection of git details
log_git_patch: Default(False) - allow you to enable/disable the
automatic collection of git patch
display_summary_level: Default(1) - control the summary detail that is
displayed on the console at end of experiment. If 0, the summary
notification is still sent. Valid values are 0 to 2.
disabled: Default(False) - allows you to disable all network
communication with the Comet.ml backend. It is useful when you
just needs to works on your machine-learning scripts and need
to relaunch them several times at a time.
Note: ExistingExperiment does not alter nor destroy previously
logged information. To override or add to previous information
you will have to set the appropriate following parameters to True:
* log_code
* log_graph
* parse_args
* log_env_details
* log_git_metadata
* log_git_patch
* log_env_gpu
* log_env_cpu
* log_env_host
For example, to continue to collect GPU information in an
`ExistingExperiment` you will need to override these parameters:
```python
>>> experiment = ExistingExperiment(
... log_env_details=True,
... log_env_gpu=True)
```
"""
# Validate the previous experiment id
self.config = get_config()
self.previous_experiment = get_previous_experiment(
previous_experiment, self.config
)
if not is_valid_experiment_key(self.previous_experiment):
raise ValueError("Invalid experiment key: %s" % self.previous_experiment)
# TODO: Document the parameter
self.step_copy = kwargs.pop("step_copy", None)
## Defaults for ExistingExperiment:
## For now, don't destroy previous Experiment information by default:
for (key, config_name, default) in [
("log_code", "comet.auto_log.code", False),
("log_graph", "comet.auto_log.graph", False),
("parse_args", "comet.auto_log.cli_arguments", False),
("log_env_details", "comet.auto_log.env_details", False),
("log_git_metadata", "comet.auto_log.git_metadata", False),
("log_git_patch", "comet.auto_log.git_patch", False),
("log_env_gpu", "comet.auto_log.env_gpu", False),
("log_env_cpu", "comet.auto_log.env_cpu", False),
("log_env_host", "comet.auto_log.env_host", False),
]:
if key not in kwargs or kwargs[key] is None:
kwargs[key] = self.config.get_bool(
None, config_name, default, not_set_value=None
)
super(ExistingExperiment, self).__init__(api_key, **kwargs)
def _get_experiment_key(self):
if self.step_copy is None:
return self.previous_experiment
else:
return generate_guid()
def _authenticate(self):
"""
Do the handshake with the Backend to authenticate the api key and get
various parameters and settings
"""
# Get an id for this run
try:
if self.step_copy is None:
run_id_response = self.connection.get_old_run_id(
self.previous_experiment
)
else:
run_id_response = self.connection.copy_run(
self.previous_experiment, self.step_copy
)
(
self.run_id,
full_ws_url,
self.project_id,
self.is_github,
self.focus_link,
self.upload_limit,
self.asset_upload_limit,
feature_toggles,
initial_offset,
self.upload_web_asset_url_prefix,
self.upload_web_image_url_prefix,
self.upload_api_asset_url_prefix,
self.upload_api_image_url_prefix,
) = run_id_response
self.feature_toggles = FeatureToggles(feature_toggles, self.config)
return (full_ws_url, initial_offset)
except InvalidAPIKey as e:
LOGGER.error(INVALID_API_KEY, e.api_key, exc_info=True)
return
except ValueError:
LOGGER.error(INTERNET_CONNECTION_ERROR, exc_info=True)
return
def send_notification(self, *args, **kwargs):
"""
With an `Experiment`, this method will send your a notification
through email when an experiment ends. However, with an
`ExistingExperiment` this method does nothing.
"""
pass
def get_name(self):
"""
This functionality is not available in ExistingExperiment.
Use APIExperiment() instead.
"""
raise NotImplementedError(
"get_name() is not available in ExistingExperiment; use APIExperiment instead"
)
```
#### File: site-packages/comet_ml/_jupyter.py
```python
def _in_jupyter_environment():
# type: () -> bool
"""
Check to see if code is running in a Jupyter environment,
including jupyter notebook, lab, or console.
"""
try:
import IPython
except Exception:
return False
ipy = IPython.get_ipython()
if ipy is None or not hasattr(ipy, "kernel"):
return False
else:
return True
def _in_ipython_environment():
# type: () -> bool
"""
Check to see if code is running in an IPython environment.
"""
try:
import IPython
except Exception:
return False
ipy = IPython.get_ipython()
if ipy is None:
return False
else:
return True
def display_or_open_browser(url, clear=False, wait=True, new=0, autoraise=True):
# type: (str, bool, bool, int, bool) -> None
if _in_jupyter_environment():
from IPython.display import display, IFrame, clear_output
if clear:
clear_output(wait=wait)
display(IFrame(src=url, width="100%", height="800px"))
else:
import webbrowser
webbrowser.open(url, new=new, autoraise=autoraise)
```
#### File: comet_ml/loggers/keras_logger.py
```python
import logging
from .._logging import GET_CALLBACK_FAILURE, check_module
from .._typing import Any, Dict
from . import tensorboard_logger
LOGGER = logging.getLogger(__name__)
def fit_keras_logger(experiment, original, *args, **kwargs):
return fit_logger("keras", "keras", experiment, original, args, kwargs)
def fit_tf_logger(experiment, original, *args, **kwargs):
return fit_logger(
"tf-keras", "TensorFlow Keras", experiment, original, args, kwargs
)
def get_callback_kwargs(args, kwargs):
# type: (Any, Any) -> Dict[Any, Any]
try:
import tensorflow
except ImportError:
return {}
# Find X
if len(args) >= 2:
x = args[1]
elif "x" in kwargs:
x = kwargs["x"]
else:
LOGGER.debug("No x argument found")
return {}
if isinstance(x, tensorflow.data.Dataset):
LOGGER.warning(
"tensorflow datasets are not currently supported for gradient and activation auto-logging"
)
return {}
# Find Y
if len(args) >= 3:
y = args[2]
elif "y" in kwargs:
y = kwargs["y"]
else:
LOGGER.debug("No y argument found")
return {}
return {"inputs": x, "targets": y}
def fit_logger(keras_type, framework, experiment, original, args, kwargs):
if not experiment.disabled_monkey_patching:
try:
callback_kwargs = get_callback_kwargs(args, kwargs)
callback = experiment.get_callback(keras_type, **callback_kwargs)
except Exception:
LOGGER.warning(GET_CALLBACK_FAILURE, framework, exc_info=True)
return
if "callbacks" in kwargs and kwargs["callbacks"] is not None:
callbacks = kwargs["callbacks"]
# Only append the callback if it's not there.
if not any(
x.__class__.__name__ == callback.__class__.__name__ for x in callbacks
):
LOGGER.debug("adding %r logger", framework)
callbacks.append(callback)
else:
LOGGER.debug("not adding %r logger", framework)
else:
kwargs["callbacks"] = [callback]
LOGGER.debug("tensorboard metric logging disabled by %r logger", framework)
# Disable tensorboard metric logging as it conflicts with keras:
tensorboard_logger.LOG_METRICS = False
tensorboard_logger.LOG_HISTOGRAMS = False
LOGGER.debug("New keras arguments %r %r", args, kwargs)
return args, kwargs
def multi_gpu_model_wrapper(experiment, original, model, result, *args, **kwargs):
try:
experiment._storage["keras"]["json_model"][
"gpu_model_%s" % id(model)
] = model.to_json(sort_keys=True)
except Exception:
experiment._log_once_at_level(
logging.DEBUG, "Failed to saved multi-GPU model", exc_info=True
)
def patch(module_finder):
check_module("keras")
check_module("tensorflow")
module_finder.register_before("keras.models", "Model.fit", fit_keras_logger)
module_finder.register_before(
"keras.models", "Model.fit_generator", fit_keras_logger
)
module_finder.register_before(
"tensorflow.python.keras.models", "Model.fit", fit_tf_logger
)
module_finder.register_before(
"tensorflow.python.keras.models", "Model.fit_generator", fit_tf_logger
)
module_finder.register_after(
"keras.utils.training_utils", "multi_gpu_model", multi_gpu_model_wrapper
)
check_module("keras")
check_module("tensorflow")
```
#### File: comet_ml/loggers/shap_logger.py
```python
import logging
from .._logging import check_module
from .._typing import Any
from ..experiment import BaseExperiment
LOGGER = logging.getLogger(__name__)
class SHAPLogger(object):
def __init__(self, title):
self.show = True
self.title = title
def before(self, experiment, original, *args, **kwargs):
"""
Little wrapper to make sure show is False
"""
self.show = kwargs.get("show", True)
kwargs["show"] = False
return (args, kwargs)
def after(self, experiment, original, results, *args, **kwargs):
# type: (BaseExperiment, Any, Any, Any, Any) -> None
"""
Little wrapper to log figure, and show, if needed.
"""
# The post callback shouldn't execute in case of exception in original, so we should have
# matplotlib except if some shap analysis do not need matplotlib in the future
try:
import matplotlib.pyplot as plt
except ImportError:
LOGGER.warning("matplotlib not installed; shap logging disabled")
return
if experiment.config["comet.auto_log.figures"]:
experiment._storage["shap"]["counter"] += 1
experiment._log_figure(
figure_name="shap-%s-%s"
% (self.title, experiment._storage["shap"]["counter"]),
figure_type="shap",
framework="shap",
)
if self.show:
plt.show()
def patch(module_finder):
check_module("shap")
modules = [
("shap.plots._bar", "bar", "bar"),
("shap.plots._bar", "bar_legacy", "bar_plot"),
("shap.plots._image", "image", "image_plot"),
("shap.plots._beeswarm", "beeswarm", "beeswarm"),
("shap.plots._beeswarm", "summary_legacy", "summary_plot"),
("shap.plots._decision", "decision", "decision_plot"),
("shap.plots._decision", "multioutput_decision", "multioutput_decision_plot"),
("shap.plots._embedding", "embedding", "embedding_plot"),
("shap.plots._force", "force", "force_plot"),
("shap.plots._group_difference", "group_difference", "group_difference_plot"),
("shap.plots._heatmap", "heatmap", "heatmap"),
("shap.plots._scatter", "scatter", "scatter"),
("shap.plots._scatter", "dependence_legacy", "dependence_plot"),
("shap.plots._monitoring", "monitoring", "monitoring_plot"),
(
"shap.plots._partial_dependence",
"partial_dependence",
"partial_dependence_plot",
),
("shap.plots._violin", "violin", "violin"),
("shap.plots._waterfall", "waterfall", "waterfall_plot"),
]
for module, function, title in modules:
shap_logger = SHAPLogger(title)
module_finder.register_before(module, function, shap_logger.before)
module_finder.register_after(module, function, shap_logger.after)
check_module("shap")
```
#### File: site-packages/comet_ml/rpc.py
```python
import collections
import inspect
import logging
import traceback
from comet_ml.exceptions import BadCallbackArguments
import six
from ._typing import Any, Optional
LOGGER = logging.getLogger(__name__)
RemoteCall = collections.namedtuple(
"RemoteCall", "callId userName functionName cometDefined arguments createdAt"
)
def create_remote_call(call):
# type: (Any) -> Optional[RemoteCall]
""" Convert the backend response body to a RPCCall object
"""
# The Backend currently sends an ExperimentKey field that unnecessary for
# us, drop it
call.pop("experimentKey", None)
call.pop("projectId", None)
new_arguments = {}
# Transform the arguments into a dict
for arg in call.pop("arguments", []):
new_arguments[arg["name"]] = arg["value"]
try:
return RemoteCall(arguments=new_arguments, **call)
except Exception:
LOGGER.debug("Error converting RPC payload", exc_info=True)
return None
def get_remote_action_definition(function):
if six.PY2:
argspec = inspect.getargspec(function)
arguments = argspec.args
# Check that the function accept a keyword argument named experiment
if "experiment" not in arguments and argspec.keywords is None:
raise BadCallbackArguments(function)
if "experiment" in arguments:
arguments.remove("experiment")
elif six.PY3:
argspec = inspect.getfullargspec(function)
# Check that the function accept a keyword argument named experiment
if (
"experiment" not in argspec.args
and argspec.varkw is None
and "experiment" not in argspec.kwonlyargs
):
raise BadCallbackArguments(function)
# It is forbidden to declare an argument name both as a positional and
# keyword-only argument, so we shouldn't get duplicates names
arguments = argspec.args + argspec.kwonlyargs
if "experiment" in arguments:
arguments.remove("experiment")
return {
"functionName": function.__name__,
"functionDocumentation": function.__doc__ or "",
"argumentNames": arguments,
}
def call_remote_function(function, experiment, rpc_call):
try:
result = function(experiment=experiment, **rpc_call.arguments)
return {"success": True, "result": convert_result_to_string(result)}
except Exception as e:
LOGGER.debug(
"Error calling %r with %r", function, rpc_call.arguments, exc_info=True
)
return {
"success": False,
"error_traceback": traceback.format_exc(),
"error": str(e),
}
def convert_result_to_string(remote_call_result):
try:
return str(remote_call_result)
except Exception:
try:
LOGGER.debug("Error casting as a string", exc_info=True)
return repr(remote_call_result)
except Exception:
LOGGER.debug("Error casting with repr", exc_info=True)
# Really nasty object, we need to be extra careful here
result_class = remote_call_result.__class__
result_dict = remote_call_result.__dict__
return "Instance of class %r with dict %s" % (result_class, result_dict)
```
#### File: site-packages/comet_ml/summary.py
```python
import logging
from collections import defaultdict
from ._typing import Any, Dict, List, Optional
from .utils import format_bytes, log_once_at_level
LOGGER = logging.getLogger(__name__)
class Summary(object):
def __init__(self, experiment_class_name):
# type: (str) -> None
self.experiment_class_name = experiment_class_name
self.topics = {
"data": SummaryTopic("Data"),
"uploads": SummaryTopic("Uploads"),
"others": SummaryTopic("Others"),
"parameters": SummaryTopic("Parameters"),
"metrics": SummaryTopic("Metrics", minmax=True),
"system-info": SummaryTopic("System Information"),
}
def set(self, topic, name, value, framework=None):
# type: (str, str, Any, Optional[str]) -> None
if topic in self.topics:
self.topics[topic].set(name, value, framework=framework)
else:
LOGGER.error("no such summary topic: %r", topic)
def get(self, topic, key):
# type: (str, str) -> Any
if topic in self.topics:
if key in self.topics[topic].details:
return self.topics[topic].details[key]
return None
def increment_section(self, topic, section, size=None, framework=None):
# type: (str, str, Optional[int], Optional[str]) -> None
if topic in self.topics:
self.topics[topic].increment_section(
section, size=size, framework=framework
)
else:
LOGGER.error("no such summary topic: %r", topic)
def generate_summary(self, display_summary_level):
# type: (int) -> Dict[str, Any]
"""
Generate and optionally display.
Args:
display_summary_level: level of details to display.
Return dictionary is of the form:
{
'Data': {
'url': 'https://comet.ml/workspace/project/2637237464736'
},
'Metrics [count] (min, max)': {
'sys.cpu.percent.01': '2.9',
'sys.cpu.percent.avg': '9.575000000000001',
'sys.load.avg': '0.58',
'sys.ram.total': '16522285056.0',
'sys.ram.used': '13996814336.0',
'train_acc [10]': '(0.1639556496115957, 0.9755067788284781)',
'train_loss [10]': '(0.02660752389019383, 0.9435748153289714)',
'validate_acc': '0.820739646603997',
'validate_loss': '0.7258299466381112'},
'Others': {
'Name': 'my experiment'
},
'Uploads': {
'asset': '2 (2 MB)',
'git-patch': '1'
}
}
"""
summary = {}
for topic_key in self.topics:
topic_summary = self.topics[topic_key]
topic_name = topic_summary.name
details = {} # type: Dict[str, Any]
count = 0
minmax = 0
empty = True
for key in topic_summary.details:
frameworks = topic_summary.get_frameworks(key)
if "comet" in frameworks:
# at least one was logged by the system framework
if display_summary_level < 2:
# don't show system logged items like cpu, gpu, etc.
continue
empty = False
detail_name = key
key_summary = topic_summary.details[key]
if key_summary["count"] > 1:
detail_name += " [%s]" % key_summary["count"]
count += 1
if (
key_summary["min"] != float("inf")
and key_summary["max"] != float("-inf")
and (key_summary["min"] != key_summary["max"])
):
minmax += 1
details[detail_name] = (key_summary["min"], key_summary["max"])
else:
if key_summary["value"]:
details[detail_name] = key_summary["value"]
else: # counts, and maybe size
if key_summary["size"]:
details[detail_name] = "%s (%s)" % (
key_summary["count"],
format_bytes(key_summary["size"]),
)
else:
details[detail_name] = key_summary["count"]
if not empty:
if count > 0:
if minmax > 0:
topic_description = "%s [count] (min, max)" % topic_name
else:
topic_description = "%s [count]" % topic_name
else:
topic_description = "%s" % topic_name
summary[topic_description] = details
if display_summary_level > 0:
title = "Comet.ml %s Summary" % self.experiment_class_name
LOGGER.info("-" * len(title))
LOGGER.info(title)
LOGGER.info("-" * len(title))
for topic in sorted(summary):
# Show description
LOGGER.info(" %s:", topic)
# First, find maximum size of description:
max_size = 0
for desc in summary[topic]:
max_size = max(max_size, len(desc) + 1)
for desc in sorted(summary[topic]):
value = summary[topic][desc]
LOGGER.info(" %-" + str(max_size) + "s: %s", desc, value)
LOGGER.info("-" * len(title))
return summary
class SummaryTopic(object):
def __init__(self, name, minmax=False):
# type: (str, bool) -> None
self.name = name
self.minmax = minmax
def default():
default_value = {
"value": None,
"min": float("inf"),
"max": float("-inf"),
"count": 0,
"size": 0,
"frameworks": [],
}
return default_value
self.details = defaultdict(default)
def get_frameworks(self, section):
# type: (str) -> List
return self.details[section]["frameworks"]
def append_framework(self, section, framework):
# type: (str, str) -> None
self.get_frameworks(section).append(framework)
def set(self, key, value, framework=None):
# type: (str, Any, Optional[str]) -> None
self.details[key]["value"] = value
self.increment_section(key, framework=framework)
if self.minmax:
try:
min_value = self.details[key]["min"]
self.details[key]["min"] = min(min_value, value)
max_value = self.details[key]["max"]
self.details[key]["max"] = max(max_value, value)
except Exception:
log_once_at_level(
logging.DEBUG, "summary of %r cannot get min, max values" % key
)
def increment_section(self, section, size=None, framework=None):
# type: (str, Optional[int], Optional[str]) -> None
self.details[section]["count"] += 1
if size:
self.details[section]["size"] += size
if framework:
self.append_framework(section, framework)
```
#### File: site-packages/comet_ml/tf_utils.py
```python
import tensorflow as tf
from ._typing import Any
@tf.function
def get_gradients(model, batch_input, batch_target, weights):
# type: (Any, Any, Any, Any) -> Any
"""
Function to compute gradients of the weights wrt the loss.
"""
with tf.GradientTape() as tape:
output = model(batch_input)
loss_function = tf.keras.losses.get(model.loss)
loss = loss_function(output, batch_target)
gradients = tape.gradient(loss, weights)
return gradients
```
#### File: everett/ext/yamlfile.py
```python
import logging
import os
import yaml
from everett import ConfigurationError, NO_VALUE
from everett.manager import generate_uppercase_key, get_key_from_envs, listify
logger = logging.getLogger("everett")
class ConfigYamlEnv(object):
"""Source for pulling configuration from YAML files.
This requires optional dependencies. You can install them with::
$ pip install everett[yaml]
Takes a path or list of possible paths to look for a YAML file. It uses
the first YAML file it can find.
If it finds no YAML files in the possible paths, then this configuration
source will be a no-op.
This will expand ``~`` as well as work relative to the current working
directory.
This example looks just for the YAML file specified in the environment::
from everett.manager import ConfigManager
from everett.ext.yamlfile import ConfigYamlEnv
config = ConfigManager([
ConfigYamlEnv(os.environ.get('FOO_YAML'))
])
If there's no ``FOO_YAML`` in the environment, then the path will be
ignored.
Here's an example that looks for the YAML file specified in the environment
variable ``FOO_YAML`` and failing that will look for ``.antenna.yaml`` in
the user's home directory::
from everett.manager import ConfigManager
from everett.ext.yamlfile import ConfigYamlEnv
config = ConfigManager([
ConfigYamlEnv([
os.environ.get('FOO_YAML'),
'~/.antenna.yaml'
])
])
This example looks for a ``config/local.yaml`` file which overrides values
in a ``config/base.yaml`` file both are relative to the current working
directory::
from everett.manager import ConfigManager
from everett.ext.yamlfile import ConfigYamlEnv
config = ConfigManager([
ConfigYamlEnv('config/local.yaml'),
ConfigYamlEnv('config/base.yaml')
])
Note how you can have multiple ``ConfigYamlEnv`` files. This is how you
can set Everett up to have values in one YAML file override values in
another YAML file.
Everett looks for keys and values in YAML files. YAML files can be split
into multiple documents, but Everett only looks at the first one.
Keys are case-insensitive. You can do namespaces either in the key itself
using ``_`` as a separator or as nested mappings.
All values should be double-quoted.
Here's an example::
foo: "bar"
FOO2: "bar"
namespace_foo: "bar"
namespace:
namespace2:
foo: "bar"
Giving you these namespaced keys:
* ``FOO``
* ``FOO2``
* ``NAMESPACE_FOO``
* ``NAMESPACE_NAMEPSACE2_FOO``
"""
def __init__(self, possible_paths):
self.cfg = {}
self.path = None
possible_paths = listify(possible_paths)
for path in possible_paths:
if not path:
continue
path = os.path.abspath(os.path.expanduser(path.strip()))
if path and os.path.isfile(path):
self.path = path
self.cfg = self.parse_yaml_file(path)
break
if not self.path:
logger.debug("No YAML file found: %s", possible_paths)
def parse_yaml_file(self, path):
"""Parse yaml file at ``path`` and return a dict."""
with open(path, "r") as fp:
data = yaml.safe_load(fp)
if not data:
return {}
def traverse(namespace, d):
cfg = {}
for key, val in d.items():
if isinstance(val, dict):
cfg.update(traverse(namespace + [key], val))
elif isinstance(val, str):
cfg["_".join(namespace + [key]).upper()] = val
else:
# All values should be double-quoted strings so they
# parse as strings; anything else is a configuration
# error at parse-time
raise ConfigurationError(
"Invalid value %r in file %s: values must be double-quoted strings"
% (val, path)
)
return cfg
return traverse([], data)
def get(self, key, namespace=None):
"""Retrieve value for key."""
if not self.path:
return NO_VALUE
logger.debug("Searching %r for key: %s, namepsace: %s", self, key, namespace)
full_key = generate_uppercase_key(key, namespace)
return get_key_from_envs(self.cfg, full_key)
def __repr__(self):
return "<ConfigYamlEnv: %s>" % self.path
``` |
{
"source": "jingalls/phantomrestclient",
"score": 2
} |
#### File: phantomrestclient/phantom-rest-client/phantomAssets.py
```python
import os
from basePhantom import _BasePhantom
class _PhantomAssets(_BasePhantom):
"""
Wrapper around the Phantom REST calls for Assets
"""
def __init__(self, host, username, password):
_BasePhantom.__init__(self, host, username, password)
self.endpoint_url = os.path.join(self.host, "asset")
def get_asset(self, asset_id):
if asset_id is None:
raise ValueError("asset_id is required")
url = os.path.join(self.endpoint_url, asset_id)
return self.__make_rest_request__(url, "GET")
def get_all_assets(self):
url = self.endpoint_url + "?page_size=0"
return self.__make_rest_request__(url, "GET")
def create_asset(self, data):
raise NotImplementedError()
``` |
{
"source": "JingangLang/Baidu_XJTU_2018_company_logo_detection",
"score": 2
} |
#### File: Baidu_XJTU_2018_company_logo_detection/utils/draw_result.py
```python
import os
import shutil
import colorsys
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import pandas as pd
from tqdm import tqdm
mapping_list = pd.read_csv('./class_name.csv', header=None, names=['abbr', 'fullname']) # 类编号-类名映射
# 随机生成颜色
hsv_tuples = [(x / 60, 1., 1.)
for x in range(60)] # 60 classes
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
def init():
if os.path.exists('../result_vis'):
for _, _, files in os.walk('../result_vis'):
for file in files:
os.remove('../result_vis/' + file)
os.rmdir('../result_vis')
os.mkdir('../result_vis')
for _, _, files in os.walk('../../data/test'):
for file in files:
shutil.copy('../../data/test/' + file, '../result_vis/' + file)
def plot_box(row):
image = Image.open('../result_vis/' + row['filename'])
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(font='../font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
# print(row['claz'], (row['x_min'], row['y_min']), (row['x_max'], row['y_max']))
for i in range(thickness):
draw.rectangle(
[row['x_min'] + i, row['y_min'] + i, row['x_max'] - i, row['y_max']- i],
outline=colors[int(row['claz'])-1])
label = '{} {:.2f}'.format(mapping_list.loc[row['claz'], 'abbr'], row['score'])
label_size = draw.textsize(label, font)
if row['y_min'] - label_size[1] >= 0:
text_origin = np.array([row['x_min'], row['y_min'] - label_size[1]])
else:
text_origin = np.array([row['x_min'], row['y_min'] + 1])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[int(row['claz'])-1])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
image.save('../result_vis/' + row['filename'])
init()
result_list = pd.read_csv('../result/960_0.1_0.6_YOLOv3.csv', sep=' ', header=None, index_col=False, names=['filename', 'claz', 'score', 'x_min', 'y_min', 'x_max', 'y_max'])
for i in tqdm(range(len(result_list))):
plot_box(result_list.iloc[i])
``` |
{
"source": "JingbinLiu/DRL",
"score": 2
} |
#### File: algos/sppox/sppox.py
```python
import numpy as np
import tensorflow as tf
import gym
import time
import spinup.algos.sppox.core as core
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_tf import MpiAdamOptimizer, sync_all_params
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
class PPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam) # + vals[:-1] # Adv + V = Q
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
# adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
# self.adv_buf = (self.adv_buf - adv_mean) / adv_std
return [self.obs_buf, self.act_buf, self.adv_buf,
self.ret_buf, self.logp_buf]
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
"""
def sppo(args, env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2,
train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=200,
target_kl=0.01, logger_kwargs=dict(), save_freq=10):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp`` (batch,) | Gives log probability, according to
| the policy, of taking actions ``a_ph``
| in states ``x_ph``.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``.
``v`` (batch,) | Gives the value estimate for states
| in ``x_ph``. (Critical: make sure
| to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.)
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
seed += 10000 * proc_id()
tf.set_random_seed(seed)
np.random.seed(seed)
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph = core.placeholders_from_spaces(env.observation_space, env.action_space)
adv_ph, ret_ph, logp_old_ph = core.placeholders(None, None, None)
# Main outputs from computation graph
pi, logp, logp_pi, h, v = actor_critic(x_ph, a_ph, **ac_kwargs)
# Need all placeholders in *this* order later (to zip with data from buffer)
all_phs = [x_ph, a_ph, adv_ph, ret_ph, logp_old_ph]
# Every step, get: action, value, and logprob
get_action_ops = [pi, v, logp_pi, h]
# Experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['pi', 'v'])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)
# PPO objectives
ratio = tf.exp(logp - logp_old_ph) # pi(a|s) / pi_old(a|s)
# For PPO
# min_adv = tf.where(adv_ph > 0, (1 + clip_ratio) * adv_ph, (1 - clip_ratio) * adv_ph)
# pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_ph, min_adv))
# SPPO NO.2: add entropy
adv_logp = adv_ph - args.alpha * tf.stop_gradient(logp)
min_adv = tf.where(adv_logp>0, (1+clip_ratio)*adv_logp, (1-clip_ratio)*adv_logp)
pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_logp, min_adv))
v_loss = tf.reduce_mean((ret_ph - v)**2)
# Info (useful to watch during learning)
approx_kl = tf.reduce_mean(logp_old_ph - logp) # a sample estimate for KL-divergence, easy to compute
# approx_ent = tf.reduce_mean(-logp) # a sample estimate for entropy, also easy to compute
approx_ent = tf.reduce_mean(-h) # exact entropy
clipped = tf.logical_or(ratio > (1+clip_ratio), ratio < (1-clip_ratio))
clipfrac = tf.reduce_mean(tf.cast(clipped, tf.float32))
# Optimizers
train_pi = MpiAdamOptimizer(learning_rate=args.pi_lr).minimize(pi_loss)
train_v = MpiAdamOptimizer(learning_rate=args.vf_lr).minimize(v_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Sync params across processes
sess.run(sync_all_params())
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph}, outputs={'pi': pi, 'v': v})
def update():
inputs = {k:v for k,v in zip(all_phs, buf.get())}
pi_l_old, v_l_old, ent = sess.run([pi_loss, v_loss, approx_ent], feed_dict=inputs)
# Training
for i in range(train_pi_iters):
_, kl = sess.run([train_pi, approx_kl], feed_dict=inputs)
kl = mpi_avg(kl)
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.'%i)
break
logger.store(StopIter=i)
for _ in range(train_v_iters):
sess.run(train_v, feed_dict=inputs)
# Log changes from update
pi_l_new, v_l_new, kl, cf = sess.run([pi_loss, v_loss, approx_kl, clipfrac], feed_dict=inputs)
logger.store(LossPi=pi_l_old, LossV=v_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(pi_l_new - pi_l_old),
DeltaLossV=(v_l_new - v_l_old))
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
a, v_t, logp_t, h_t = sess.run(get_action_ops, feed_dict={x_ph: o.reshape(1,-1)})
# SPPO NO.1: add entropy
rh = r - args.alpha * logp_t
# rh = r - args.alpha * h_t # exact entropy
# save and log
buf.store(o, a, rh, v_t, logp_t)
logger.store(VVals=v_t)
o, r, d, _ = env.step(a[0])
ep_ret += r
ep_len += 1
# d = False if ep_len == max_ep_len else d
terminal = d or (ep_len == max_ep_len)
if terminal or (t==local_steps_per_epoch-1):
if not(terminal):
print('Warning: trajectory cut off by epoch at %d steps.'%ep_len)
# if trajectory didn't reach terminal state, bootstrap value target
last_val = r if d else sess.run(v, feed_dict={x_ph: o.reshape(1,-1)})
buf.finish_path(last_val)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# # Save model
# if (epoch % save_freq == 0) or (epoch == epochs-1):
# logger.save_state({'env': env}, None)
# Perform PPO update!
update()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='LunarLander-v2') # CartPole-v0 Acrobot-v1 Breakout-ram-v4 # 'LunarLanderContinuous-v2' 0.02 # LunarLander-v2 0.05
parser.add_argument('--max_ep_len', type=int, default=1000)
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--pi_lr', type=float, default=3e-4)
parser.add_argument('--vf_lr', type=float, default=1e-3)
parser.add_argument('--seed', '-s', type=int, default=3)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=4000)
parser.add_argument('--epochs', type=int, default=30000)
parser.add_argument('--exp_name', type=str, default='LunarLander-v2_apple_0.1_logp')
args = parser.parse_args()
mpi_fork(args.cpu) # run parallel code with mpi
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
class Wrapper(object):
def __init__(self, env, action_repeat=1):
self._env = env
self.action_repeat = action_repeat
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
r = 0.0
for _ in range(self.action_repeat):
obs_, reward_, done_, info_ = self._env.step(action)
reward_ = reward_ if reward_ > -99.0 else 0.0
r = r + reward_
if done_:
return obs_, r, done_, info_
return obs_, r, done_, info_
sppo(args, lambda : Wrapper(gym.make(args.env),1), actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma, max_ep_len=args.max_ep_len,
seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,
logger_kwargs=logger_kwargs)
``` |
{
"source": "jingbowen/meiduo_001",
"score": 2
} |
#### File: celery_tasks/sms/tesks.py
```python
from celery_tasks.main import celery_app
# @celery_app.tesks() # 将下面的函数装饰成为celery任务
# def send_sms_code(mobile, sms_code):
# (手机号,[随即生成的验证码,允许存在时间],模板编号)
# CCP().send_template_sms(mobile,[sms_code, 5], 1)
```
#### File: apps/users/views.py
```python
from django.shortcuts import render,redirect
from django.views import View
from django.http import HttpResponse,HttpResponseForbidden,JsonResponse
import re
from .models import User
from django.db import DatabaseError
from django.contrib.auth import login,authenticate
from meiduo_mell.utils.response_code import RETCODE
from django_redis import get_redis_connection
# Create your views here.
class RegisterView(View):
def get(self, request):
return render(request, "register.html")
def post(self,request):
username = request.POST.get("username")
password = request.POST.get("password")
password2 = request.POST.get("password2")
mobile = request.POST.get("mobile")
sms_code = request.POST.get("sms_code")
image_code = request.POST.get("image_code")
allow = request.POST.get("allow")
if not all([username, password, <PASSWORD>, mobile, allow]):
return HttpResponseForbidden("缺少必要的参数")
if not re.match(r'^[A-Za-z0-9-_]{5,20}$', username):
return HttpResponseForbidden("请输入5-20个字符的用户名")
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden("请输入8-20位的密码")
if password != password2:
return HttpResponseForbidden("两次输入的密码不相等")
if not re.match(r'^1[3-9]\d{9}$', mobile):
return HttpResponseForbidden('请输入正确的手机号码')
if allow != "on":
return HttpResponseForbidden('请勾选用户协议')
redis_conn = get_redis_connection("verify_code")
sms_code_server = redis_conn.get("sms_%s" % mobile)
if sms_code_server is None:
return render(request, "register.html", {"sms_code_errmsg": "验证码无效"})
if sms_code != sms_code_server.decode():
return render(request, "register.html",{"sms_code_errmsg": "验证码错误"})
try:
User.objects.create_user(username=username, password=password, mobile=mobile)
except DatabaseError:
return render(request, 'register.html', {'register_errmsg': '注册失败'})
# login(request, username)
return redirect("/")
class UserNameContView(View):
def get(self,request,username,):
count = User.objects.filter(username=username).count()
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', "count": count})
class MobileCountView(View):
def get(self,request,mobile):
count = User.objects.filter(mobile=mobile).count()
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', "count": count})
class LoginView(View):
def get(self,request):
return render(request,"login.html")
def post(self,request):
username = request.POST.get("username")
password = request.POST.get("password")
remembered = request.POST.get("remembered")
if all([username, password]) is None:
return HttpResponseForbidden("缺少必传的参数")
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return HttpResponseForbidden('请输入正确的用户名或手机号')
# 判断密码是否是8-20个数字
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden('密码最少8位,最长20位')
user = authenticate(username=username, password=password)
if user is None:
return render(request, "login.html", {"account_errmsg":"用户名或密码错误"})
login(request, user)
if remembered != "on":
request.session.set_expiry(0)
return redirect("/")
``` |
{
"source": "jingcao80/Elastos",
"score": 2
} |
#### File: addons/service.xbmc.versioncheck/service.py
```python
import os
import platform
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonname__ = __addon__.getAddonInfo('name')
__addonpath__ = __addon__.getAddonInfo('path').decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
__localize__ = __addon__.getLocalizedString
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % (__addonname__, txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
class Main:
def __init__(self):
if __addon__.getSetting("versioncheck_enable") == 'true' and not xbmc.getCondVisibility('System.HasAddon(os.openelec.tv)'):
if not sys.argv[0]:
xbmc.executebuiltin('XBMC.AlarmClock(CheckAtBoot,XBMC.RunScript(service.xbmc.versioncheck, started),00:00:30,silent)')
xbmc.executebuiltin('XBMC.AlarmClock(CheckWhileRunning,XBMC.RunScript(service.xbmc.versioncheck, started),24:00:00,silent,loop)')
elif sys.argv[0] and sys.argv[1] == 'started':
if xbmc.getCondVisibility('System.Platform.Linux'):
oldversion = _versionchecklinux('xbmc')
else:
oldversion = _versioncheck()
if oldversion[0]:
_upgrademessage(oldversion[1])
else:
pass
def _versioncheck():
# initial vars
oldversion = False
msg = ''
# retrieve versionlists from supplied version file
version_file = os.path.join(__addonpath__, 'resources/versions.txt')
# Eden didn't have xbmcvfs.File()
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "11.9.3":
file = open(version_file, 'r')
else:
file = xbmcvfs.File(version_file)
data = file.read()
file.close()
version_query = unicode(data, 'utf-8', errors='ignore')
version_query = simplejson.loads(version_query)
# Create seperate version lists
versionlist_stable = version_query['releases']['stable']
versionlist_rc = version_query['releases']['releasecandidate']
versionlist_beta = version_query['releases']['beta']
versionlist_alpha = version_query['releases']['alpha']
versionlist_prealpha = version_query['releases']['prealpha']
# retrieve current installed version
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_query = simplejson.loads(json_query)
version_installed = []
if json_query.has_key('result') and json_query['result'].has_key('version'):
version_installed = json_query['result']['version']
log("Version installed %s" %version_installed)
# set oldversion flag to false
oldversion = False
### Check to upgrade to newest available stable version
# check on smaller major version. Smaller version than available always notify
if version_installed['major'] < int(versionlist_stable[0]['major']):
msg = __localize__(32003)
oldversion = True
log("Version available %s" %versionlist_stable[0])
# check on same major version installed and available
elif version_installed['major'] == int(versionlist_stable[0]['major']):
# check on smaller minor version
if version_installed['minor'] < int(versionlist_stable[0]['minor']):
msg = __localize__(32003)
oldversion = True
log("Version available %s" %versionlist_stable[0])
# check if not installed a stable so always notify
elif version_installed['tag'] != "stable":
msg = __localize__(32008)
oldversion = True
log("Version available %s" %versionlist_stable[0])
else:
log("Last available stable installed")
### Check to upgrade to newest available RC version if not installed stable
## Check also oldversion hasn't been set true by previous check because if so this need to be skipped
if not oldversion and version_installed['tag'] != "stable":
# only check on equal or lower major because newer installed beta/alpha/prealpha version will be higher
if versionlist_rc and version_installed['major'] <= int(versionlist_rc[0]['major']):
if version_installed['revision'] <= versionlist_rc[0]['revision']:
msg = __localize__(32004)
oldversion = True
log("Version available %s" %versionlist_rc[0])
# exclude if installed RC on checking for newer beta
if not oldversion and versionlist_beta and version_installed['tag'] not in ["releasecandidate"]:
if version_installed['major'] <= int(versionlist_beta[0]['major']):
if version_installed['revision'] < versionlist_beta[0]['revision']:
msg = __localize__(32005)
oldversion = True
log("Version available %s" %versionlist_beta[0])
# exclude if installed RC or beta on checking for newer alpha
if not oldversion and versionlist_alpha and version_installed['tag'] not in ["releasecandidate", "beta"]:
if version_installed['major'] <= int(versionlist_alpha[0]['major']):
if version_installed['revision'] < versionlist_alpha[0]['revision']:
msg = __localize__(32006)
oldversion = True
log("Version available %s" %versionlist_alpha[0])
# exclude if installed RC, beta or alpha on checking for newer prealpha
if not oldversion and versionlist_prealpha and version_installed['tag'] not in ["releasecandidate", "beta", "alpha"]:
if version_installed['major'] <= int(versionlist_prealpha[0]['major']):
if version_installed['revision'] < versionlist_prealpha[0]['revision']:
msg = __localize__(32007)
oldversion = True
log("Version available %s" %versionlist_prealpha[0])
# Nothing to see here, move along
else:
# Nothing to see here, move along
pass
return oldversion, msg
def _versionchecklinux(package):
if (platform.dist()[0] == "Ubuntu" or platform.dist()[0] == "Debian"):
oldversion, msg = _versioncheckapt(package)
else:
log("Unsupported platform %s" %platform.dist()[0])
sys.exit(0)
return oldversion, msg
def _versioncheckapt(package):
#check for linux using Apt
# initial vars
oldversion = False
msg = ''
result = ''
# try to import apt
try:
import apt
from aptdaemon import client
from aptdaemon import errors
except:
log('python apt import error')
sys.exit(0)
apt_client = client.AptClient()
try:
result = apt_client.update_cache(wait=True)
if (result == "exit-success"):
log("Finished updating the cache")
else:
log("Error updating the cache %s" %result)
except errors.NotAuthorizedError:
log("You are not allowed to update the cache")
sys.exit(0)
trans = apt_client.upgrade_packages([package])
trans.simulate(reply_handler=_apttransstarted, error_handler=_apterrorhandler)
pkg = trans.packages[4][0]
if (pkg == package):
cache=apt.Cache()
cache.open(None)
cache.upgrade()
if (cache[package].installed and cache[package].installed.version != cache[package].candidate.version):
log("Version installed %s" %cache[package].installed.version)
log("Version available %s" %cache[package].candidate.version)
oldversion = True
msg = __localize__(32011)
elif (cache[package].installed):
log("Already on newest version %s" %cache[package].installed.version)
else:
log("No installed package found, probably manual install")
sys.exit(0)
return oldversion, msg
def _apttransstarted():
pass
def _apterrorhandler(error):
raise error
def _upgrademessage(msg):
# Don't show while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 5 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
# Detect if it's first run and only show OK dialog + ask to disable on that
firstrun = __addon__.getSetting("versioncheck_firstrun") != 'false'
if firstrun and not xbmc.abortRequested:
xbmcgui.Dialog().ok(__addonname__,
msg,
__localize__(32001),
__localize__(32002))
# sets check to false which is checked on startup
if xbmcgui.Dialog().yesno(__addonname__,
__localize__(32009),
__localize__(32010)):
__addon__.setSetting("versioncheck_enable", 'false')
# set first run to false to only show a popup next startup / every two days
__addon__.setSetting("versioncheck_firstrun", 'false')
# Show notification after firstrun
elif not xbmc.abortRequested:
log(__localize__(32001) + '' + __localize__(32002))
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(__addonname__,
__localize__(32001) + '' + __localize__(32002),
15000,
__icon__))
else:
pass
if (__name__ == "__main__"):
log('Version %s started' % __addonversion__)
Main()
``` |
{
"source": "jingchaocheng/running_page",
"score": 3
} |
#### File: scripts/gpxtrackposter/github_drawer.py
```python
import calendar
import datetime
import locale
import svgwrite
from .utils import format_float
from .exceptions import PosterError
from .poster import Poster
from .tracks_drawer import TracksDrawer
from .xy import XY
class GithubDrawer(TracksDrawer):
"""Draw a gtihub profile-like poster"""
def __init__(self, the_poster: Poster):
super().__init__(the_poster)
def draw(self, dr: svgwrite.Drawing, size: XY, offset: XY):
if self.poster.tracks is None:
raise PosterError("No tracks to draw")
year_size = 200 * 4.0 / 80.0
year_style = f"font-size:{year_size}px; font-family:Arial;"
year_length_style = f"font-size:{110 * 3.0 / 80.0}px; font-family:Arial;"
month_names_style = f"font-size:2.5px; font-family:Arial"
total_length_year_dict = self.poster.total_length_year_dict
for year in range(self.poster.years.from_year, self.poster.years.to_year + 1)[
::-1
]:
start_date_weekday, _ = calendar.monthrange(year, 1)
github_rect_first_day = datetime.date(year, 1, 1)
# Github profile the first day start from the last Monday of the last year or the first Monday of this year
# It depands on if the first day of this year is Monday or not.
github_rect_day = github_rect_first_day + datetime.timedelta(
-start_date_weekday
)
year_length = total_length_year_dict.get(year, 0)
year_length = format_float(self.poster.m2u(year_length))
try:
month_names = [
locale.nl_langinfo(day)[:3] # Get only first three letters
for day in [
locale.MON_1,
locale.MON_2,
locale.MON_3,
locale.MON_4,
locale.MON_5,
locale.MON_6,
locale.MON_7,
locale.MON_8,
locale.MON_9,
locale.MON_10,
locale.MON_11,
locale.MON_12,
]
]
# support windows or others doesn't support locale Name, by Hard code
except Exception as e:
print(str(e))
month_names = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
km_or_mi = "mi"
if self.poster.units == "metric":
km_or_mi = "km"
dr.add(
dr.text(
f"{year}",
insert=offset.tuple(),
fill=self.poster.colors["text"],
alignment_baseline="hanging",
style=year_style,
)
)
dr.add(
dr.text(
f"{year_length} {km_or_mi}",
insert=(offset.tuple()[0] + 165, offset.tuple()[1] + 2),
fill=self.poster.colors["text"],
alignment_baseline="hanging",
style=year_length_style,
)
)
# add month name up to the poster one by one because of svg text auto trim the spaces.
for num, name in enumerate(month_names):
dr.add(
dr.text(
f"{name}",
insert=(offset.tuple()[0] + 15.5 * num, offset.tuple()[1] + 14),
fill=self.poster.colors["text"],
style=month_names_style,
)
)
rect_x = 10.0
dom = (2.6, 2.6)
# add every day of this year for 53 weeks and per week has 7 days
for i in range(54):
rect_y = offset.y + year_size + 2
for j in range(7):
if int(github_rect_day.year) > year:
break
rect_y += 3.5
color = "#444444"
date_title = str(github_rect_day)
if date_title in self.poster.tracks_by_date:
tracks = self.poster.tracks_by_date[date_title]
length = sum([t.length for t in tracks])
distance1 = self.poster.special_distance["special_distance"]
distance2 = self.poster.special_distance["special_distance2"]
has_special = distance1 < length / 1000 < distance2
color = self.color(
self.poster.length_range_by_date, length, has_special
)
if length / 1000 >= distance2:
color = self.poster.colors.get(
"special2"
) or self.poster.colors.get("special")
str_length = format_float(self.poster.m2u(length))
date_title = f"{date_title} {str_length} {km_or_mi}"
rect = dr.rect((rect_x, rect_y), dom, fill=color)
rect.set_desc(title=date_title)
dr.add(rect)
github_rect_day += datetime.timedelta(1)
rect_x += 3.5
offset.y += 3.5 * 9 + year_size + 1.5
``` |
{
"source": "jingchengdeng/Lens_Smear_Detection",
"score": 3
} |
#### File: Lens_Smear_Detection/hw2/main.py
```python
import csv
# import numpy as np
import math
from geopy.distance import vincenty
def read(file):
f = open(file)
data = csv.reader(f)
return data
def get_latitude(fileName):
data = read(fileName)
latitudes = []
for row in data:
latitude = row[3]
latitudes.append(float(latitude))
return latitudes
def getShapepoint(data, index):
result = []
for i in range(len(data)):
rawData = data[i][-3].split('|')[index].split('/')
result.append([float(rawData[0]), float(rawData[1])])
return result
def getXY(data):
result = []
for i in range(len(data)):
row = []
row.append(float(data[i][3]))
row.append(float(data[i][4]))
result.append(row)
return result
def matchPoint(probe, link, probePoints, linkData):
id = []
point = []
for i in range(len(probe)):
if i > 0 and probePoints[i][0] == probePoints[i-1][0]:
id.append(id[-1])
point.append(point[-1])
continue
d = []
for j in range(len(link)):
d.append(vincenty((probe[i][0], probe[i][1]), (link[j][0], link[j][1])).meters)
point.append(d.index(min(d)))
id.append(int(linkData[point[-1]][0]))
return id, point
def getDistance(beginPoint, endPoint):
point = []
for i in range(len(beginPoint)):
d = []
for j in range(len(endPoint)):
d.append(vincenty((beginPoint[i][0],beginPoint[i][1]), (endPoint[j][0], endPoint[j][1])).meters)
point.append(d.index(min(d)))
return point
def getDFR(probe, refNode, index):
slope = []
for i in range(len(probe)):
p2 = refNode[index[i]]
slope.append(vincenty((probe[i][0], probe[i][1]), (p2[0], p2[1])).meters)
return slope
def getPerDis(p1, p2):
pass
def getSlope(p1, probePoints):
slope = []
for i in range(len(p1)-1):
slope.append((int(probePoints[i+1][-3])-int(probePoints[i][-3]))/vincenty((p1[i][0], p1[i][1]), (p1[i+1][0], p1[i+1][1])).meters)
return slope
if __name__ == "__main__":
probePoints = [line.split(',') for line in open('./Partition6467ProbePoints.csv')]
linkData = [line.split(',') for line in open('./Partition6467LinkData.csv')]
probeXY = getXY(probePoints)
refNode = getShapepoint(linkData, 0)
shapePoint = getShapepoint(linkData, -1)
match, index = matchPoint(probeXY[0:1000], refNode, probePoints, linkData)
dFR = getDFR(probeXY[0:1000], refNode, index)
dir = []
for i in range(len(dFR)-1):
if dFR[i+1] > dFR[i]:
dir.append('T')
else:
dir.append('F')
dir.append(dir[-1])
dFL = []
for i in range(1000):
h = linkData[i][-3].split('|')[0].split('/')[-1]
if h == '':
dFL.append(0)
else:
dFL.append(math.sqrt(dFR[i]**2+float(h)**2))
slope = getSlope(probeXY[0:1000], probePoints)
csvFile = file('Partition6467MatchedPoints.csv','wb')
writer = csv.writer(csvFile)
writer.writerow(['sampleID', 'dataTime', 'sourceCode', 'latitude', 'longitude', 'altitude', 'speed', 'heading', 'linkPVID', 'direction', 'distFromRef', 'distFromLink'])
data = probePoints
for i in range(1000):
data[i][-1] = data[i][-1][0:-2]
data[i].append(str(match[i]))
data[i].append(dir[i])
data[i].append(str(dFR[i]))
data[i].append(str(dFL[i]))
writer.writerow(data[i])
csvFile.close()
``` |
{
"source": "jingchengdeng/Probe-Data-Analysis-for-Road-Slope",
"score": 3
} |
#### File: jingchengdeng/Probe-Data-Analysis-for-Road-Slope/match.py
```python
import csv
import operator
from math import sin, cos, sqrt, atan2, radians
def calDistance(lat1, lon1, lat2, lon2):
R = 6373.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
def getNode(data):
points = data[14].split('|')
ref = points[0].split('/')
nref = points[-1].split('/')
reflat = ref[0]
reflon = ref[1]
nreflat = nref[0]
nreflon = nref[1]
return reflat, reflon, nreflat, nreflon
def projection(probelat, probelong, reflat, reflon, nreflat, nreflon):
x = float(probelat)
y = float(probelong)
x1, y1 = float(reflat), float(reflon)
x2, y2 = float(nreflat), float(nreflon)
tmp = (x2 - x1) * (x - x1) + (y2 - y1) * (y - y1)
if (tmp <= 0):
return calDistance(x, y, x1, y1)
d2 = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1)
if (tmp >= d2):
return calDistance(x, y, x2, y2)
r = tmp / d2
px = x1 + (x2 - x1) * r
py = y1 + (y2 - y1) * r
return calDistance(x, y, px, py)
def getDirection(prob1, prob2, refpoint):
dis1 = calDistance(float(prob1[0]), float(prob1[1]), float(refpoint[0]), float(refpoint[1]))
dis2 = calDistance(float(prob2[0]), float(prob2[1]), float(refpoint[0]), float(refpoint[1]))
if dis1-dis2 < 0:
return True
else:
return False
if __name__ == "__main__":
probeData = [line.split(',') for line in open('./Partition6467ProbePoints.csv')]
linkData = [line.split(',') for line in open('./Partition6467LinkData.csv')]
routeData = []
probeData = probeData[:10000]
count =0
# for i in range(len(probeData)):
# probeData[i].append(i)
for k in range(len(linkData)):
reflat, reflon, nreflat, nreflon = getNode(linkData[k])
routeData.append((linkData[k][0], reflat, reflon, nreflat, nreflon))
for i in range(len(probeData)):
candidate = []
for k in range(len(routeData)):
lat = probeData[i][3]
lon = probeData[i][4]
distance = projection(lat, lon, routeData[k][1], routeData[k][2],
routeData[k][3], routeData[k][4])
candidate.append((routeData[k][0], distance, routeData[k][1], routeData[k][2]))
print("progress: ", i ," /",len(probeData))
matchMap = min(candidate, key=operator.itemgetter(1))
with open('Partition6467MatchedPoints.csv', 'a', newline='') as csvfile:
output = csv.writer(csvfile)
sampleID = probeData[i][0]
dateTime = probeData[i][1]
sourceCode = probeData[i][2]
latitude = probeData[i][3]
lontitude = probeData[i][4]
altitude = probeData[i][5]
speed = probeData[i][6]
heading = probeData[i][7].rstrip()
linkPVID = matchMap[0]
if getDirection((probeData[i][3],probeData[i][4]),(probeData[i-1][3],probeData[i-1][4]),
(matchMap[2],matchMap[3])):
direction = 'T'
else:
direction = 'F'
distFromRef = calDistance(float(probeData[i][3]),
float(probeData[i][4]),
float(matchMap[2]),
float(matchMap[3]))
distFromLink = matchMap[1]
output.writerow((sampleID, dateTime, sourceCode, latitude, lontitude, altitude, speed, heading, linkPVID, direction, distFromRef, distFromLink))
print("Match Point Finished!")
``` |
{
"source": "JingChuanHe/PythonTools",
"score": 3
} |
#### File: JingChuanHe/PythonTools/Data.py
```python
from tkinter import *
import xlrd
import xlwt
import os
class excelT:
def __init__(self):
window = Tk(className = "处理数据")
window.geometry('190x90+150+150')
lable = Label(window,text = "Filename")
self.msg = StringVar()
self.msg1 = StringVar()
entryName = Entry(window,textvariable= self.msg)
entryName1 = Entry(window, textvariable=self.msg1)
readButton = Button(window, text="Read", fg="red", command=self.read_excel)
writeButton = Button(window, text="Write", fg="red", command=self.save_excel)
lable.grid(row=1, column=1)
entryName.grid(row=2, column=1)
entryName1.grid(row = 3,column=1)
readButton.grid(row=2, column=2)
writeButton.grid(row = 3,column = 2)
window.mainloop()
def clickMe(self):
print("Clicke Me ",self.msg.get())
self.read_excel()
def read_excel(self):
print("Come", self.msg.get())
excelName = self.msg.get() + ".xlsx"
# 打开文件
file_Name = 'F:\data' + '\\' + excelName
workbook = xlrd.open_workbook(file_Name)
sheet2 = workbook.sheet_by_index(0)
cols = sheet2.col_values(0) # 获取第一列内容
print("/****************Wright Excel*******************/")
m = 0
self.listMax = []
for i in range(21, len(cols)):
if int(cols[i]) == m:
m = m + 1
list0 = sheet2.row_values(i)
self.listMax.append(list0)
print(self.listMax)
print("/****************INT*******************/")
def save_excel(self):
print(os.getcwd())
os.chdir('f:\\data\\NewData')
print(os.getcwd())
print("Come",self.msg1.get(),self.listMax)
excelName = self.msg1.get() + ".xls"
f = xlwt.Workbook()
sheet2 = f.add_sheet('sheet1', cell_overwrite_ok=True)
row0 = ['时间s', '力kN', '变形mm', '位移mm', '扩展', '应力MPa', '应变%']
for i in range(0, len(row0)):
sheet2.write(0, i, row0[i])
for j in range(0, len(self.listMax)):
data1 = self.listMax[j]
for i in range(0, len(row0)):
sheet2.write(j + 1, i, data1[i])
f.save(excelName) # 保存文件
excelT()
```
#### File: JingChuanHe/PythonTools/excelT.py
```python
import xlrd
import xlwt
def read_excel():
# 打开文件
workbook = xlrd.open_workbook(r'F:\data\00.xlsx')
sheet2 = workbook.sheet_by_index(0)
cols = sheet2.col_values(0) # 获取第一列内容
print("/****************Wright Excel*******************/")
m = 0
listMax = []
for i in range(21,len(cols)):
if int (cols[i]) == m:
m = m + 1
list0 = sheet2.row_values(i)
listMax.append(list0)
print(listMax)
print("/****************INT*******************/")
save_excel(listMax)
def save_excel(data0 ):
f = xlwt.Workbook()
sheet2 = f.add_sheet('sheet1', cell_overwrite_ok=True)
row0 = ['时间s', '力kN', '变形mm', '位移mm', '扩展','应力MPa','应变%']
for i in range(0, len(row0)):
sheet2.write(0, i, row0[i])
for j in range(0,len(data0)):
data1 = data0[j]
for i in range(0, len(row0)):
sheet2.write(j+1, i, data1[i])
f.save('demo13.xls') # 保存文件
if __name__ == '__main__':
read_excel()
```
#### File: JingChuanHe/PythonTools/imageFile.py
```python
def imageF():
image = imread(r"E:\Python Working\png")
if __name__ =="__main__":
imageF()
```
#### File: PythonTools/MessageInfoSys/InfoNet.py
```python
from MessageInfoSys.mysqlTools import mysqlTools
from PIL import Image
class infoNet():
def __init__(self):
self.sqlTools = mysqlTools('TESTDB')
# with open('E:/PythonWorking/befor523/AutomateTheBoringStuffWithPython/mv1/122600095003.JPG','rb') as imageNet:
# imageStrom = imageNet.read()
# print(type(imageStrom))
# print(imageNet.read())
# with open('E:/PythonWorking/test/image.JPG', 'wb') as file:
# byteCount = file.write(imageStrom)
# print(byteCount)
# # self.sqlTools.cursor.fetcsaveImage(imageStrom)
# self.sqlTools.updateImage(imageStrom,9)
# # self.sqlTools.del_Info(11)
self.sqlTools.getImage()
infoNet()
```
#### File: JingChuanHe/PythonTools/pandasLearn.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpyLearn import *
import testFile
'''
python数据分析库,包含dataframe
'''
class pandasLearn:
def __init__(self):
testFile
print("eee")
pandasLearn()
``` |
{
"source": "JingChunzhen/Paddle",
"score": 2
} |
#### File: fleet/utils/ps_util.py
```python
import numpy as np
class Distributed:
@staticmethod
def estimate(main_program, varname2tables):
def distributed_ops_pass(program):
SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"}
def _get_pull_sparse_ops(_program):
pull_sparse_ops = {}
for op in _program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
ops = pull_sparse_ops.get(param_name, [])
ops.append(op)
pull_sparse_ops[param_name] = ops
return pull_sparse_ops
def _pull_sparse_fuse(_program, pull_sparse_ops):
for param, ops in pull_sparse_ops.items():
all_ops = program.global_block().ops
op_idxs = [all_ops.index(op) for op in ops]
inputs = [
program.global_block().vars[op.input("Ids")[0]]
for op in ops
]
w = program.global_block().vars[ops[0].input("W")[0]]
if w.name not in varname2tables.keys():
raise ValueError(
"can not find variable {}, please check your configuration".
format(w.name))
table_id = varname2tables[w.name]
padding_idx = ops[0].attr("padding_idx")
is_distributed = ops[0].attr("is_distributed")
op_type = ops[0].type
outputs = [
program.global_block().vars[op.output("Out")[0]]
for op in ops
]
for idx in op_idxs[::-1]:
program.global_block()._remove_op(idx)
inputs_idxs = [-1] * len(inputs)
outputs_idxs = [-1] * len(outputs)
for idx, op in enumerate(program.global_block().ops):
for i in range(0, len(op.output_names)):
outs = op.output(op.output_names[i])
for in_id, in_var in enumerate(inputs):
if in_var.name in outs:
inputs_idxs[in_id] = idx
for i in range(0, len(op.input_names)):
ins = op.input(op.input_names[i])
for out_id, out_var in enumerate(outputs):
if out_var.name in ins:
outputs_idxs[out_id] = idx
if min(outputs_idxs) - max(inputs_idxs) >= 1:
distributed_idx = max(inputs_idxs) + 1
program.global_block()._insert_op(
index=distributed_idx,
type="distributed_lookup_table",
inputs={"Ids": inputs,
'W': w},
outputs={"Outputs": outputs},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"lookup_table_version": op_type
})
else:
raise ValueError(
"something wrong with Fleet, submit a issue is recommended"
)
pull_sparse_ops = _get_pull_sparse_ops(program)
_pull_sparse_fuse(program, pull_sparse_ops)
return program
covert_program = distributed_ops_pass(main_program)
return covert_program
``` |
{
"source": "Jingciii/parfit",
"score": 3
} |
#### File: parfit/parfit/fit.py
```python
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
__all__ = ["fitModels"]
def fitOne(model, X, y, params):
"""
Makes one model fit using provided data and parameters
:param model: The instantiated model you wish to pass, e.g. LogisticRegression()
:param X: The independent variable data
:param y: The response variable data
:param params: The parameters passed through to the model from the parameter grid
:return: Returns the fitted model
"""
if isinstance(model, BaseEstimator):
model.set_params(**params)
else:
model = model(**params)
return model.fit(X, y)
def fitModels(model, paramGrid, X, y, n_jobs=-1, verbose=10):
"""
Parallelizes fitting all models using all combinations of parameters in paramGrid on provided data.
:param model: The instantiated model you wish to pass, e.g. LogisticRegression()
:param paramGrid: The ParameterGrid object created from sklearn.model_selection
:param X: The independent variable data
:param y: The response variable data
:param n_jobs: Number of cores to use in parallelization (defaults to -1: all cores)
:param verbose: The level of verbosity of reporting updates on parallel process
Default is 10 (send an update at the completion of each job)
:return: Returns a list of fitted models
Example usage:
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ParameterGrid
model = LogisticRegression()
grid = {
'C': [1e-4, 1e-3], # regularization
'penalty': ['l1','l2'], # penalty type
'n_jobs': [-1] # parallelize within each fit over all cores
}
paramGrid = ParameterGrid(grid)
myModels = fitModels(model, paramGrid, X_train, y_train)
"""
return Parallel(n_jobs=n_jobs, verbose=verbose)(delayed(fitOne)(model, X, y, params) for params in paramGrid)
``` |
{
"source": "JingdiC/SARNet",
"score": 2
} |
#### File: sarnet_td3/common/gpu_multithread.py
```python
import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "add_to_buffer_reinforce":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2)
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train)
act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
train_step, buffer_data = data
agent = self.trainers[p_index]
loss = agent.update(self.trainers, buffer_data, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, terminal = data
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[-1][i].append(info_n[0]['n'])
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
self.agent_info[j].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(self.ep_success, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
episode_b_success = []
for j in range(self.num_env):
episode_b_success.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len
print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
self.final_ep_rewards.append(episode_b_success)
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv):
threads = []
sess = tf.compat.v1.get_default_session()
for t in range(args.num_gpu_threads):
input_q = queue.Queue()
output_q = queue.Queue()
if args.policy_grad == "maddpg":
threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
elif args.policy_grad == "reinforce":
threads.append(
MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
threads[t].start()
time.sleep(1)
return threads
def close_gputhreads(threads):
for t in threads:
t.input_queue.put(("None", None, None))
for t in threads:
t.join()
print('GPU trainers cancelled')
return
``` |
{
"source": "jingdongHe/license_plate_recognition",
"score": 3
} |
#### File: jingdongHe/license_plate_recognition/license_plate_recognition.py
```python
import time
import multiprocessing as mp
from hyperlpr import *
import cv2
faceCascade = cv2.CascadeClassifier('./myhaar.xml')
pointTime = 0
def image_put(q, user, pwd, ip, channel=1):
cap = cv2.VideoCapture("rtsp://%s:%s@%s//Streaming/Channels/%d" % (user, pwd, ip, channel))
while True:
q.put(cap.read()[1])
q.get() if q.qsize() > 1 else time.sleep(0.01)
def image_get(q, window_name):
cv2.namedWindow(window_name, flags=cv2.WINDOW_FREERATIO)
while True:
frame = q.get()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow(window_name, gray)
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.2,
minNeighbors=5,
minSize=(60, 60)
)
for (x,y,w,h) in faces:
print(faces)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),1)
LPR(time.time(),frame)
time.sleep(0.1)
cv2.waitKey(1)
def LPR(requestTime,frame):
global pointTime
# print(requestTime,pointTime,requestTime - pointTime)
if(requestTime - pointTime >1.5):
result = HyperLPR_plate_recognition(frame)
im_url = r'E:/workspace/LPR/IMG/check/%s.jpg'%time.strftime("%H-%M-%S", time.localtime())
print(im_url)
cv2.imwrite(im_url,frame)
if(len(result)>0):
print(result)
im_url = r'E:/workspace/LPR/IMG/success/R-%s.jpg'%(result[0][0]+"-"+str(result[0][1])[:5])
print("success==",im_url)
cv2.imwrite(im_url,frame)
pointTime = requestTime
def run_single_camera():
user_name, user_pwd, camera_ip = "admin", "admin123456", "192.168.1.1:554"
mp.set_start_method(method='spawn') # init
queue = mp.Queue(maxsize=2)
processes = [mp.Process(target=image_put, args=(queue, user_name, user_pwd, camera_ip)),
mp.Process(target=image_get, args=(queue, camera_ip))]
[process.start() for process in processes]
[process.join() for process in processes]
if __name__ == '__main__':
run_single_camera()
``` |
{
"source": "jinge90/llvm",
"score": 2
} |
#### File: functionalities/scripted_process/TestScriptedProcess.py
```python
import os, json, tempfile
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from lldbsuite.test import lldbtest
class ScriptedProcesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
def tearDown(self):
TestBase.tearDown(self)
def test_python_plugin_package(self):
"""Test that the lldb python module has a `plugins.scripted_process`
package."""
self.expect('script import lldb.plugins',
substrs=["ModuleNotFoundError"], matching=False)
self.expect('script dir(lldb.plugins)',
substrs=["scripted_process"])
self.expect('script import lldb.plugins.scripted_process',
substrs=["ModuleNotFoundError"], matching=False)
self.expect('script dir(lldb.plugins.scripted_process)',
substrs=["ScriptedProcess"])
self.expect('script from lldb.plugins.scripted_process import ScriptedProcess',
substrs=["ImportError"], matching=False)
self.expect('script dir(ScriptedProcess)',
substrs=["launch"])
def test_invalid_scripted_register_context(self):
"""Test that we can launch an lldb scripted process with an invalid
Scripted Thread, with invalid register context."""
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
log_file = self.getBuildArtifact('thread.log')
self.runCmd("log enable lldb thread -f " + log_file)
self.assertTrue(os.path.isfile(log_file))
os.environ['SKIP_SCRIPTED_PROCESS_LAUNCH'] = '1'
def cleanup():
del os.environ["SKIP_SCRIPTED_PROCESS_LAUNCH"]
self.addTearDownHook(cleanup)
scripted_process_example_relpath = 'invalid_scripted_process.py'
self.runCmd("command script import " + os.path.join(self.getSourceDir(),
scripted_process_example_relpath))
launch_info = lldb.SBLaunchInfo(None)
launch_info.SetProcessPluginName("ScriptedProcess")
launch_info.SetScriptedProcessClassName("invalid_scripted_process.InvalidScriptedProcess")
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertTrue(error.Success(), error.GetCString())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetProcessID(), 666)
self.assertEqual(process.GetNumThreads(), 0)
with open(log_file, 'r') as f:
log = f.read()
self.assertIn("Failed to get scripted thread registers data.", log)
@skipIf(archs=no_match(['x86_64']))
def test_scripted_process_and_scripted_thread(self):
"""Test that we can launch an lldb scripted process using the SBAPI,
check its process ID, read string from memory, check scripted thread
id, name stop reason and register context.
"""
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
os.environ['SKIP_SCRIPTED_PROCESS_LAUNCH'] = '1'
def cleanup():
del os.environ["SKIP_SCRIPTED_PROCESS_LAUNCH"]
self.addTearDownHook(cleanup)
scripted_process_example_relpath = 'dummy_scripted_process.py'
self.runCmd("command script import " + os.path.join(self.getSourceDir(),
scripted_process_example_relpath))
launch_info = lldb.SBLaunchInfo(None)
launch_info.SetProcessPluginName("ScriptedProcess")
launch_info.SetScriptedProcessClassName("dummy_scripted_process.DummyScriptedProcess")
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertTrue(process and process.IsValid(), PROCESS_IS_VALID)
self.assertEqual(process.GetProcessID(), 42)
self.assertEqual(process.GetNumThreads(), 1)
thread = process.GetSelectedThread()
self.assertTrue(thread, "Invalid thread.")
self.assertEqual(thread.GetThreadID(), 0x19)
self.assertEqual(thread.GetName(), "DummyScriptedThread.thread-1")
self.assertEqual(thread.GetStopReason(), lldb.eStopReasonSignal)
self.assertGreater(thread.GetNumFrames(), 0)
frame = thread.GetFrameAtIndex(0)
GPRs = None
register_set = frame.registers # Returns an SBValueList.
for regs in register_set:
if 'general purpose' in regs.name.lower():
GPRs = regs
break
self.assertTrue(GPRs, "Invalid General Purpose Registers Set")
self.assertEqual(GPRs.GetNumChildren(), 21)
for idx, reg in enumerate(GPRs, start=1):
self.assertEqual(idx, int(reg.value, 16))
def create_stack_skinny_corefile(self, file):
self.build()
target, process, thread, _ = lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.c"))
self.assertTrue(process.IsValid(), "Process is invalid.")
# FIXME: Use SBAPI to save the process corefile.
self.runCmd("process save-core -s stack " + file)
self.assertTrue(os.path.exists(file), "No stack-only corefile found.")
self.assertTrue(self.dbg.DeleteTarget(target), "Couldn't delete target")
@skipUnlessDarwin
@skipIfOutOfTreeDebugserver
@skipIfAsan # rdar://85954489
def test_launch_scripted_process_stack_frames(self):
"""Test that we can launch an lldb scripted process from the command
line, check its process ID and read string from memory."""
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
for module in target.modules:
if 'a.out' in module.GetFileSpec().GetFilename():
main_module = module
break
self.assertTrue(main_module, "Invalid main module.")
error = target.SetModuleLoadAddress(main_module, 0)
self.assertTrue(error.Success(), "Reloading main module at offset 0 failed.")
os.environ['SKIP_SCRIPTED_PROCESS_LAUNCH'] = '1'
def cleanup():
del os.environ["SKIP_SCRIPTED_PROCESS_LAUNCH"]
self.addTearDownHook(cleanup)
scripted_process_example_relpath = 'stack_core_scripted_process.py'
self.runCmd("command script import " + os.path.join(self.getSourceDir(),
scripted_process_example_relpath))
corefile_process = None
with tempfile.NamedTemporaryFile() as file:
self.create_stack_skinny_corefile(file.name)
corefile_target = self.dbg.CreateTarget(None)
corefile_process = corefile_target.LoadCore(self.getBuildArtifact(file.name))
self.assertTrue(corefile_process, PROCESS_IS_VALID)
structured_data = lldb.SBStructuredData()
structured_data.SetFromJSON(json.dumps({
"backing_target_idx" : self.dbg.GetIndexOfTarget(corefile_process.GetTarget())
}))
launch_info = lldb.SBLaunchInfo(None)
launch_info.SetProcessPluginName("ScriptedProcess")
launch_info.SetScriptedProcessClassName("stack_core_scripted_process.StackCoreScriptedProcess")
launch_info.SetScriptedProcessDictionary(structured_data)
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertTrue(error.Success(), error.GetCString())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetProcessID(), 42)
self.assertEqual(process.GetNumThreads(), 1)
thread = process.GetSelectedThread()
self.assertTrue(thread, "Invalid thread.")
self.assertEqual(thread.GetName(), "StackCoreScriptedThread.thread-1")
self.assertEqual(thread.GetNumFrames(), 3)
frame = thread.GetSelectedFrame()
self.assertTrue(frame, "Invalid frame.")
self.assertEqual(frame.GetFunctionName(), "bar")
self.assertEqual(int(frame.FindValue("i", lldb.eValueTypeVariableArgument).GetValue()), 42)
self.assertEqual(int(frame.FindValue("j", lldb.eValueTypeVariableLocal).GetValue()), 42 * 42)
``` |
{
"source": "jingechen/welib",
"score": 2
} |
#### File: examples/onshore_YAMS/000_CPLambdaPitch.py
```python
import numpy as np
import os
from welib import weio # https://github.com/ebranlard/weio
from welib.fast import fastlib as fastlib # latest fastlib is found at https://github.com/ebranlard/welib
def CPLambda():
""" Determine the CP-CT Lambda Pitch matrices of a turbine.
This scrip uses the function CPCT_LambdaPitch which basically does the same as ParametricExample
above.
"""
GE=True
ReRun=False # we don't rerun simulations that were already run
base = '../../_data/NREL5MW'
ref_dir = '../../_data/NREL5MW/' # Folder where the fast input files are located (will be copied)
main_file = '../../_data/NREL5MW/Main_Onshore_OF2.fst' # Main file in ref_dir, used as a template
FAST_EXE = '../../_data/OpenFAST2_x64s_ebra.exe' # Location of a FAST exe (and dll)
# --- Computing CP and CT matrices for range of lambda and pitches
nLambda = 6
nPitch = 8
Lambda = np.linspace(0.10 ,22,nLambda)
Pitch = np.linspace(-5,40,nPitch)
CP,CT,Lambda,Pitch,MaxVal,result = fastlib.CPCT_LambdaPitch(ref_dir,main_file,Lambda,Pitch,fastExe=FAST_EXE,showOutputs=False,nCores=4,TMax=30,reRun=ReRun)
print('CP max',MaxVal)
np.savetxt(base+'_Lambda.csv',Lambda,delimiter = ',')
np.savetxt(base+'_Pitch.csv' ,Pitch ,delimiter = ',')
np.savetxt(base+'_CP.csv' ,CP ,delimiter = ',')
np.savetxt(base+'_CT.csv' ,CT ,delimiter = ',')
# --- Plotting matrix of CP values
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)
CP[CP<0]=0
surf = ax.plot_surface(LAMBDA, PITCH, np.transpose(CP), cmap=cm.coolwarm, linewidth=0, antialiased=True,alpha=0.8)
ax.scatter(MaxVal['lambda_opt'],MaxVal['pitch_opt'],MaxVal['CP_max'],c='k',marker='o',s=20)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# def focus_mesh(xmin,xmax,xfocus)
if __name__=='__main__':
CPLambda()
```
#### File: welib/weio/fast_input_file_graph.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
try:
from .tools.graph import *
except ImportError:
from welib.FEM.graph import *
# --------------------------------------------------------------------------------}
# --- Wrapper to convert a "fast" input file dictionary into a graph
# --------------------------------------------------------------------------------{
def fastToGraph(data):
if 'BeamProp' in data.keys():
return subdynToGraph(data)
if 'SmplProp' in data.keys():
return hydrodynToGraph(data)
if 'DOF2Nodes' in data.keys():
return subdynSumToGraph(data)
raise NotImplementedError('Graph for object with keys: {}'.format(data.keys()))
# --------------------------------------------------------------------------------}
# --- SubDyn
# --------------------------------------------------------------------------------{
def subdynToGraph(sd):
"""
sd: dict-like object as returned by weio
"""
type2Color=[
(0.1,0.1,0.1), # Watchout based on background
(0.753,0.561,0.05), # 1 Beam
(0.541,0.753,0.05), # 2 Cable
(0.753,0.05,0.204), # 3 Rigid
(0.918,0.702,0.125), # 3 Rigid
]
Graph = GraphModel()
# --- Properties
if 'BeamProp' in sd.keys():
BProps = sd['BeamProp']
Graph.addNodePropertySet('Beam')
for ip,P in enumerate(BProps):
prop= NodeProperty(ID=P[0], E=P[1], G=P[2], rho=P[3], D=P[4], t=P[5] )
Graph.addNodeProperty('Beam',prop)
if 'CableProp' in sd.keys():
CProps = sd['CableProp']
Graph.addNodePropertySet('Cable')
for ip,P in enumerate(CProps):
Chan = -1 if len(P)<5 else P[4]
prop= NodeProperty(ID=P[0], EA=P[1], rho=P[2], T0=P[3], Chan=Chan)
Graph.addNodeProperty('Cable',prop)
if 'RigidProp' in sd.keys():
RProps = sd['RigidProp']
Graph.addNodePropertySet('Rigid')
for ip,P in enumerate(RProps):
prop= NodeProperty(ID=P[0], rho=P[1])
Graph.addNodeProperty('Rigid',prop)
# --- Nodes and DOFs
Nodes = sd['Joints']
for iNode,N in enumerate(Nodes):
Type= 1 if len(N)<=4 else N[4]
node = Node(ID=N[0], x=N[1], y=N[2], z=N[3], Type=Type)
Graph.addNode(node)
# --- Elements
Members = sd['Members'].astype(int)
PropSets = ['Beam','Cable','Rigid']
for ie,E in enumerate(Members):
Type=1 if len(E)==5 else E[5]
#elem= Element(E[0], E[1:3], propset=PropSets[Type-1], propIDs=E[3:5], Type=PropSets[Type-1])
elem= Element(E[0], E[1:3], Type=PropSets[Type-1])
elem.data['object']='cylinder'
elem.data['color'] = type2Color[Type]
Graph.addElement(elem)
# Nodal prop data
Graph.setElementNodalProp(elem, propset=PropSets[Type-1], propIDs=E[3:5])
# Nodal data
for iN,N in enumerate(sd['InterfaceJoints']):
nodeID = int(N[0])
Graph.setNodalData(nodeID,IBC=N[1:])
for iN,N in enumerate(sd['BaseJoints']):
NN=[int(n) if i<7 else n for i,n in enumerate(N)]
nodeID = NN[0]
Graph.setNodalData(nodeID,RBC=NN[1:])
# print('CMass')
# print(sd['ConcentratedMasses'])
return Graph
# --------------------------------------------------------------------------------}
# --- HydroDyn
# --------------------------------------------------------------------------------{
def hydrodynToGraph(hd):
"""
hd: dict-like object as returned by weio
"""
def type2Color(Pot):
if Pot:
return (0.753,0.05,0.204), # Pot flow
else:
return (0.753,0.561,0.05), # Morison
Graph = GraphModel()
# --- Properties
if 'AxCoefs' in hd.keys():
Props = hd['AxCoefs']
Graph.addNodePropertySet('AxCoefs')
for ip,P in enumerate(Props):
prop= NodeProperty(ID=P[0], JointAxCd=P[1], JointAxCa=P[2], JointAxCp=P[3])
Graph.addNodeProperty('AxCoefs',prop)
if 'SectionProp' in hd.keys():
Props = hd['SectionProp']
Graph.addNodePropertySet('Section')
for ip,P in enumerate(Props):
# PropSetID PropD PropThck
prop= NodeProperty(ID=P[0], D=P[1], t=P[2])
Graph.addNodeProperty('Section',prop)
if 'SmplProp' in hd.keys():
Props = hd['SmplProp']
Graph.addNodePropertySet('Smpl')
for ip,P in enumerate(Props):
# SimplCd SimplCdMG SimplCa SimplCaMG SimplCp SimplCpMG SimplAxCd SimplAxCdMG SimplAxCa SimplAxCaMG SimplAxCp SimplAxCpMG
if len(P)==12:
prop= NodeProperty(ID=ip+1, Cd=P[0], CdMG=P[1], Ca=P[2], CaMG=P[3], Cp=P[4], CpMG=P[5], AxCd=P[6], AxCdMG=P[7], AxCa=P[8], AxCaMG=P[9], AxCp=P[10], AxCpMG=P[11])
elif len(P)==10:
prop= NodeProperty(ID=ip+1, Cd=P[0], CdMG=P[1], Ca=P[2], CaMG=P[3], Cp=P[4], CpMG=P[5], AxCa=P[6], AxCaMG=P[7], AxCp=P[8], AxCpMG=P[9])
else:
raise NotImplementedError()
Graph.addNodeProperty('Smpl',prop)
if 'DpthProp' in hd.keys():
Props = hd['DpthProp']
Graph.addMiscPropertySet('Dpth')
for ip,P in enumerate(Props):
# Dpth DpthCd DpthCdMG DpthCa DpthCaMG DpthCp DpthCpMG DpthAxCd DpthAxCdMG DpthAxCa DpthAxCaMG DpthAxCp DpthAxCpMG
prop= Property(ID=ip+1, Dpth=P[0], Cd=P[1], CdMG=P[2], Ca=P[3], CaMG=P[4], Cp=P[5], CpMG=P[6], AxCd=P[7], AxCdMG=P[8], AxCa=P[9], AxCaMG=P[10], AxCp=P[11], AxCpMG=P[12])
Graph.addMiscProperty('Dpth',prop)
if 'MemberProp' in hd.keys():
# MemberID MemberCd1 MemberCd2 MemberCdMG1 MemberCdMG2 MemberCa1 MemberCa2 MemberCaMG1 MemberCaMG2 MemberCp1 MemberCp2 MemberCpMG1 MemberCpMG2 MemberAxCd1 MemberAxCd2 MemberAxCdMG1 MemberAxCdMG2 MemberAxCa1 MemberAxCa2 MemberAxCaMG1 MemberAxCaMG2 MemberAxCp1 MemberAxCp2 MemberAxCpMG1 MemberAxCpMG2
pass # TODO
# ---------------------- FILLED MEMBERS ------------------------------------------
# 0 NFillGroups - Number of filled member groups (-) [If FillDens = DEFAULT, then FillDens = WtrDens; FillFSLoc is related to MSL2SWL]
# FillNumM FillMList FillFSLoc FillDens
# (-) (-) (m) (kg/m^3)
# ---------------------- MARINE GROWTH -------------------------------------------
# 0 NMGDepths - Number of marine-growth depths specified (-)
# MGDpth MGThck MGDens
# (m) (m) (kg/m^3)
# --- Nodes
Nodes = hd['Joints']
for iNode,N in enumerate(Nodes):
node = Node(ID=N[0], x=N[1], y=N[2], z=N[3])
Graph.addNode(node)
Graph.setNodeNodalProp(node, 'AxCoefs', N[4])
# --- Elements
PropSets=['Smpl','Dpth','Member']
Members = hd['Members']
for ie,E in enumerate(Members):
# MemberID MJointID1 MJointID2 MPropSetID1 MPropSetID2 MDivSize MCoefMod PropPot
EE = E[:5].astype(int)
Type = int(E[6]) # MCoefMod
Pot = E[7].lower()[0]=='t'
elem= Element(EE[0], EE[1:3], CoefMod=PropSets[Type-1], DivSize=E[5], Pot=Pot)
elem.data['object']='cylinder'
elem.data['color'] = type2Color(Pot)
Graph.addElement(elem)
# Nodal prop data
Graph.setElementNodalProp(elem, propset='Section', propIDs=EE[3:5])
if Type==1:
# Simple
Graph.setElementNodalProp(elem, propset='Smpl', propIDs=[1,1])
else:
print('>>> TODO type Depth and member')
return Graph
# --------------------------------------------------------------------------------}
# --- SubDyn Summary file
# --------------------------------------------------------------------------------{
def subdynSumToGraph(data):
"""
data: dict-like object as returned by weio
"""
type2Color=[
(0.1,0.1,0.1), # Watchout based on background
(0.753,0.561,0.05), # 1 Beam
(0.541,0.753,0.05), # 2 Cable
(0.753,0.05,0.204), # 3 Rigid
(0.918,0.702,0.125), # 3 Rigid
]
#print(data.keys())
DOF2Nodes = data['DOF2Nodes']
nDOF = data['nDOF_red']
Graph = GraphModel()
# --- Nodes and DOFs
Nodes = data['Nodes']
for iNode,N in enumerate(Nodes):
if len(N)==9: # Temporary fix
#N[4]=np.float(N[4].split()[0])
N=N.astype(np.float32)
ID = int(N[0])
nodeDOFs=DOF2Nodes[(DOF2Nodes[:,1]==ID),0] # NOTE: these were reindex to start at 0
node = Node(ID=ID, x=N[1], y=N[2], z=N[3], Type=int(N[4]), DOFs=nodeDOFs)
Graph.addNode(node)
# --- Elements
Elements = data['Elements']
for ie,E in enumerate(Elements):
nodeIDs=[int(E[1]),int(E[2])]
# shear_[-] Ixx_[m^4] Iyy_[m^4] Jzz_[m^4] T0_[N]
D = np.sqrt(E[7]/np.pi)*4 # <<< Approximation basedon area TODO use I as well
elem= Element(int(E[0]), nodeIDs, Type=int(E[5]), Area=E[7], rho=E[8], E=E[7], G=E[8], D=D)
elem.data['object']='cylinder'
elem.data['color'] = type2Color[int(E[5])]
Graph.addElement(elem)
#print(self.extent)
#print(self.maxDimension)
# --- Graph Modes
# Very important sortDims should be None to respect order of nodes
dispGy, posGy, InodesGy, dispCB, posCB, InodesCB = data.getModes(sortDim=None)
for iMode in range(dispGy.shape[2]):
Graph.addMode(displ=dispGy[:,:,iMode],name='GY{:d}'.format(iMode+1), freq=1/(2*np.pi))
for iMode in range(dispCB.shape[2]):
Graph.addMode(displ=dispCB[:,:,iMode],name='CB{:d}'.format(iMode+1), freq=data['CB_frequencies'][iMode])
#print(Graph.toJSON())
return Graph
if __name__ == '__main__':
import weio
filename='../../data/Monopile/MT100_SD.dat'
# filename='../../_data/Monopile/TetraSpar_SubDyn_v3.dat'
sd = weio.FASTInputFile(filename)
# sd.write('OutMT.dat')
Graph = sd.toGraph()
Graph.divideElements(2)
print(Graph)
print(Graph.sortNodesBy('z'))
# print(Graph.nodalDataFrame(sortBy='z'))
print(Graph.points)
print(Graph.connectivity)
print(Graph)
# import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib import collections as mc
# from mpl_toolkits.mplot3d import Axes3D
# fig = plt.figure()
# ax = fig.add_subplot(1,2,1,projection='3d')
#
# lines=Graph.toLines(output='coord')
# for l in lines:
# # ax.add_line(l)
# ax.plot(l[:,0],l[:,1],l[:,2])
#
# ax.autoscale()
# ax.set_xlim([-40,40])
# ax.set_ylim([-40,40])
# ax.set_zlim([-40,40])
# # ax.margins(0.1)
#
# plt.show()
```
#### File: welib/yams/TNSB.py
```python
import numpy as np
import copy
import os
try:
from .yams import *
except:
from yams import *
from welib.yams.windturbine import rigidBlades
class Structure():
def __init__(self,main_axis='x',theta_tilt=0,theta_yaw=0,theta_cone=0,bTiltBeforeNac=False):
self.main_axis = main_axis
self.theta_tilt = theta_tilt
self.theta_cone = theta_cone
self.theta_yaw = theta_yaw
self.bTiltBeforeNac = bTiltBeforeNac
def compute_RNA(s):
s.M_rot= sum([B.Mass for B in s.Blds])
s.M_RNA= s.M_rot + s.Sft.Mass + s.Nac.Mass;
s.r_NGnac_inN = s.Nac.s_G_inB
s.r_NGhub_inN = s.r_NS_inN + np.dot(s.Nac.R_0b.T, np.dot(s.Sft.R_0b, s.Sft.s_G_inB))
try:
# --------------------------------------------------------------------------------}
# --- New method, requires blade to have "toRigidBody"
# --------------------------------------------------------------------------------{
# --- Rigid Blades (with origin R, using N as "global" ref)
blds_rigid = rigidBlades(s.Blds, r_O = [0,0,0]) # TODO blade origins might be wrong in TNSB
blds_rigid.pos_global = s.r_NR_inN.ravel()
R_NS = R_y(s.theta_tilt) # Rotation fromShaft to Nacelle
blds_rigid.R_b2g = R_NS
# --- Creating "hub" with respect to point N (Sft is wrt S)
#M_hub = ED['HubMass']
#JxxHub_atR = ED['HubIner']
#hub = RigidBody('Hub', M_hub, (JxxHub_atR,0,0), s_OG=r_SGhub_inS, R_b2g=R_NS, s_OP=r_SR_inS, r_O=r_NS_inN)
r_NS_inN=s.Sft.r_O.ravel()-s.Nac.r_O.ravel()
r_SGhub_inS=s.Sft.masscenter # In body coordinates, S, titled
#hub = RigidBody('Hub', s.Sft.mass, J=s.Sft.masscenter_inertia, s_OG=r_SGhub_inS, R_b2g=R_NS, r_O=r_NS_inN)
hub = RigidBody('Hub', s.Sft.mass, J_G=s.Sft.masscenter_inertia, rho_G=s.Sft.masscenter)
#hub.shiftOrigin(R_NS.T.dot(-r_NS_inN))
hub.R_b2g = R_NS
hub.pos_global = r_NS_inN
# --- Rotor = Hub + Blades (with origin R, using N as global ref)
#rot = blds_rigid.combine(s.Sft, R_b2g=R_NS, r_O=blds_rigid.pos_global)
rot = blds_rigid.combine(hub, R_b2g=R_NS, r_O=blds_rigid.pos_global)
rot.name='rotor'
#rotgen = rot.combine(gen, R_b2g=R_NS, r_O=blades.pos_global)
#RNA = rot.combine(gen).combine(nac,r_O=[0,0,0])
RNA = rot.combine(s.Nac,r_O=[0,0,0]).combine(s.Yaw, r_O=[0,0,0])
s.RNA=RNA
s.r_NGrna_inN=RNA.masscenter # in N
s.r_NGrot_inN = rot.masscenter
# Temp storage
s.blds_rigid = blds_rigid
s.rot = rot
s.hub = hub
except:
print('[WARN] TNSB: Fail to compute RNA with new method, using legacy')
s.r_NGrot_inN = s.r_NR_inN # NOTE approximation neglecting cone, putting all rotor mass at R
s.r_NGrna_inN = 1./s.M_RNA * (s.Nac.Mass*s.r_NGnac_inN + s.Sft.Mass*s.r_NGhub_inN + s.M_rot*s.r_NGrot_inN)
def init_trigger(s):
s.alpha = s.Twr.alpha_couplings
s.iPsi = s.Twr.nf # Index of DOF corresponding to azimuth
# Useful for load computation
s.r_NR_inN = s.r_NS_inN + np.dot(s.Nac.R_0b.T, np.dot(s.Sft.R_0b, s.r_SR_inS))
s.gravity = s.Twr.gravity
s.compute_RNA()
s.nDOF = len(s.q)
s.nShapes_twr = s.Twr.nf
s.nShapes_bld = s.Blds[0].nf
def GF(s,T,x,alpha_y_fact=1):
"""
T is the force along the shaft
"""
if (s.nShapes_twr!=1):
raise NotImplementedError('Number of shape function not 1')
if s.main_axis=='x':
raise NotImplementedError('Main axis along x')
# update tower kinematics
s.Twr.gzf=x
alpha = s.Twr.alpha_couplings
alpha_y=alpha[1]*alpha_y_fact
rhoN_x = s.r_NGrna_inN[0,0]
rhoN_z = s.r_NGrna_inN[2,0]
rNR_x = s.r_NR_inN[0,0]
rNR_z = s.r_NR_inN[2,0]
g = s.gravity
ux1c = 1
vy1c = s.Twr.Bhat_t_bc[1,0] # Bhat_t_bc[1,j]= self.PhiV[j][0,iNode]
ky1c = s.Twr.PhiK[0][0,-1]
Fz_inE =-T*sin(alpha_y + s.theta_tilt) - s.M_RNA*g # TODO potential softening correction
Fx_inE = T*cos(alpha_y + s.theta_tilt)
# Fx_inE = T*cos(s.theta_tilt)
# --- Softening is already in K
# k = x[0] * s.Twr.PhiK[0][0,:]
# vL = x[0] * vy1c
# U = s.Twr.PhiU[0][0,:]
# GF_soft=0
# GF_soft+= np.trapz(+U*k*Fz_inE , s.Twr.s_span)
# GF_soft+= -vL*Fz_inE*ux1c
My_inE = 0
My_inE += s.M_RNA*g*( rhoN_x*cos(alpha_y) + rhoN_z*sin(alpha_y))
My_inE +=T*(rNR_x*sin(s.theta_tilt) + rNR_z*cos(s.theta_tilt) )
GF =0
GF += Fx_inE * ux1c
GF += vy1c* My_inE
# GF = GF_soft
return GF
def GF_lin(s,T,x,bFull=True):
"""
T is the force along the shaft
Fisrt linearization: assumes the sum of alpha_y small
"""
if (s.nShapes_twr!=1):
raise NotImplementedError('Number of shape function not 1')
if s.main_axis=='x':
raise NotImplementedError('Main axis along x')
rhoN_x = s.r_NGrna_inN[0] #[0,0]
rhoN_z = s.r_NGrna_inN[2] #[2,0]
rNR_x = s.r_NR_inN[0,0]
rNR_z = s.r_NR_inN[2,0]
g = s.gravity
ux1c = s.Twr.Bhat_x_bc[1,0]
vy1c = s.Twr.Bhat_t_bc[1,0] # Bhat_t_bc[1,j]= self.PhiV[j][0,iNode]
GF = T*cos(s.theta_tilt)
if bFull:
GF += - T* vy1c * sin(s.theta_tilt) * x[0]
GF += (vy1c**2 * s.M_RNA*g * rhoN_z) * x[0]
GF += T*vy1c*(rNR_x*sin(s.theta_tilt) + rNR_z*cos(s.theta_tilt) )
GF += vy1c * s.M_RNA*g * rhoN_x
return GF
def print_info(s):
print('----------------------------------------------------------------')
print('main_axis :', s.main_axis)
print('gravity :', s.gravity)
print('tilt :', s.theta_tilt*180/np.pi)
print('cone :', s.theta_cone*180/np.pi)
print('yaw :', s.theta_yaw *180/np.pi)
def print_origins(s):
print('Origin T :',s.Twr.r_O.T)
print('Origin N :',s.Nac.r_O.T)
print('Origin R :',s.Blds[0].r_O.T)
print('Origin S :',s.Sft.r_O.T)
def print_RNA(s):
print('----------------- RNA ---------------------------------------')
print('M_RNA ', s.M_RNA)
print('r_NGrna_inN',s.r_NGrna_inN.T)
print(' r_NGnac_inN ',s.r_NGnac_inN.T , 'M_nac',s.Nac.Mass)
print(' r_NGhub_inN ',s.r_NGhub_inN.T , 'M_hub',s.Sft.Mass)
print(' r_NGrot_inN ',s.r_NGrot_inN.T , 'M_rot',s.M_rot)
def print_couplings(s):
print('---------------Couplings ---------------------------------------')
print('Constant: (Bhat_t)')
print(s.Twr.Bhat_t_bc) # Constant
print('Time varying:')
print(s.Twr.alpha_couplings) # Time varying function of Twr.gzf
def __repr__(self):
self.print_info()
self.print_origins()
self.print_RNA()
self.print_couplings()
return ''
# --------------------------------------------------------------------------------}
# --- Creating a TNSB model automatically
# --------------------------------------------------------------------------------{
def auto_assembly(Twr,Yaw,Nac,Gen,Sft,Blds,q,r_ET_inE,r_TN_inT,r_NS_inN,r_SR_inS,main_axis='x',theta_tilt_y=0,theta_yaw=0,theta_cone_y=0,DEBUG=False,bTiltBeforeNac=False):
# TODO Gen
if main_axis=='x':
#R_NS = np.dot(R_y(-tilt_up),R_z(q_psi + np.pi)) # << tilt
if bTiltBeforeNac:
R_cn0 = np.dot(R_x (theta_yaw) , R_y (theta_tilt_y))
R_cs0 = R_z (np.pi)
else:
R_cn0 = R_x (theta_yaw)
R_cs0 = np.dot( R_y(theta_tilt_y) , R_z (np.pi)) # Note: OrientBefore
Shaft_axis='z'
elif main_axis=='z':
if bTiltBeforeNac:
R_cn0 = np.dot(R_z (theta_yaw) , R_y(theta_tilt_y))
R_cs0 = R_x (np.pi)
else:
R_cn0 = R_z (theta_yaw)
R_cs0 = np.dot(R_y(theta_tilt_y) , R_x (np.pi) )# Note: OrientBefore
Shaft_axis='x'
nB=len(Blds)
# Creating reference frame
Grd = GroundBody()
# Connections between bodies
Grd.connectTo(Twr, Point=r_ET_inE, Type='Rigid')
Twr.connectTo(Nac, Point=r_TN_inT, Type='Rigid', RelOrientation = R_cn0 , OrientAfter=True)
Twr.connectTo(Yaw, Point=r_TN_inT, Type='Rigid', RelOrientation = R_cn0 , OrientAfter=True)
Nac.connectTo (Sft , Point=r_NS_inN, Type='SphericalJoint',JointRotations=[Shaft_axis],RelOrientation = R_cs0, OrientAfter=False)
for i,B in enumerate(Blds):
psi_B= -i*2*np.pi/nB # 0 -2pi/2 2pi/3 or 0 pi
if main_axis=='x':
R_SB = R_z(0*np.pi + psi_B)
elif main_axis=='z':
R_SB = R_x(0*np.pi + psi_B)
R_SB = np.dot(R_SB, R_y(theta_cone_y))
Sft.connectTo(B, Point=r_SR_inS, Type='Rigid', RelOrientation = R_SB)
# Setting DOF index for all bodies and connections
nq=Grd.setupDOFIndex(0);
if nq!=len(q):
print('>>> ',nq,len(q))
raise Exception('Wrong number of dof')
Grd.updateChildrenKinematicsNonRecursive(q)
Twr.updateChildrenKinematicsNonRecursive(q)
Yaw.updateChildrenKinematicsNonRecursive(q)
Nac.updateChildrenKinematicsNonRecursive(q)
Sft.updateChildrenKinematicsNonRecursive(q)
# --- Full system
nq = len(q)
MM = np.zeros((nq,nq))
MM = Grd.getFullM(MM)
KK = np.zeros((nq,nq))
KK = Grd.getFullK(KK)
DD = np.zeros((nq,nq))
DD = Grd.getFullD(DD)
MM[np.abs(MM)< 1e-09] = 0
# --- returning everthin in a structure class
Struct = Structure(main_axis=main_axis,theta_cone=theta_cone_y,theta_tilt=theta_tilt_y,bTiltBeforeNac=bTiltBeforeNac)
Struct.Grd = Grd
Struct.Twr = Twr
Struct.Yaw = Yaw
Struct.Nac = Nac
Struct.Sft = Sft
Struct.Blds = Blds
Struct.MM = MM
Struct.KK = KK
Struct.DD = DD
Struct.q = q
Struct.r_ET_inE=r_ET_inE
Struct.r_TN_inT=r_TN_inT
Struct.r_NS_inN=r_NS_inN
Struct.r_SR_inS=r_SR_inS
Struct.init_trigger()
return Struct
# --------------------------------------------------------------------------------}
# --- Manual assembly of a TNSB model
# --------------------------------------------------------------------------------{
def manual_assembly(Twr,Yaw,Nac,Gen,Sft,Blds,q,r_ET_inE,r_TN_inT,r_NS_inN,r_SR_inS,main_axis='x',theta_tilt_y=0,theta_cone_y=0,DEBUG=False, bTiltBeforeNac=False):
# Main Parameters
nDOF = len(q)
iPsi = Twr.nf # Index of DOF corresponding to azimuth
# CyT=- np.array([ Twr.PhiV[0][2,-1], 1.5065E-01, 0, 0]) # End value of shapes functions in y direction
CxT= np.zeros(Twr.nf)
CyT= np.zeros(Twr.nf)
CzT= np.zeros(Twr.nf)
UxT= np.zeros(Twr.nf)
UyT= np.zeros(Twr.nf)
UzT= np.zeros(Twr.nf)
for j,(u,v) in enumerate(zip(Twr.PhiU,Twr.PhiV)):
if main_axis=='x':
CyT[j]=-v[2,-1] # A deflection along z gives a negative angle around y
CzT[j]= v[1,-1] # A deflection along y gives a positive angle around z # TODO TODO CHECK ME
UyT[j]= u[1,-1]
UzT[j]= u[2,-1]
#print('Alpha y - mode {}:'.format(j+1),CyT[j])
elif main_axis=='z':
CxT[j]=-v[1,-1] # A deflection along y gives a negative angle around x # TODO TODO CHECK ME
CyT[j]= v[0,-1] # A deflection along x gives a positive angle around y
UxT[j]= u[0,-1]
UyT[j]= u[1,-1]
CyT=CyT[:Twr.nf]
UxT=UxT[:Twr.nf]
# TODO:
# Bt_pc=zeros(3,p.nf);
# for j=1:p.nf
# Bx_pc(:,j)=p.PhiU{j}(:,iNode);
# Bt_pc(:,j)=[0; -p.PhiV{j}(3,iNode); p.PhiV{j}(2,iNode)];
# end
# --------------------------------------------------------------------------------}
## --- "Manual connection"
# --------------------------------------------------------------------------------{
# link E-T
R_ET = np.identity(3)
B_T = np.array([])
# B_T = fBMatRecursion(,np.vstack((Bx_ET,Bt_ET)),R_ET,r_ET_inE)
B_T_inT = fB_inB(R_ET, B_T)
BB_T_inT = fB_aug(B_T_inT, Twr.nf)
MM_T = fBMB(BB_T_inT,Twr.MM)
KK_T = fBMB(BB_T_inT,Twr.KK)
DD_T = fBMB(BB_T_inT,Twr.DD)
Twr.r_O = r_ET_inE
Twr.R_0b = R_ET
Twr.B = B_T
Twr.B_inB = B_T_inT
Twr.BB_inB = BB_T_inT
# ---------------------------------------------
# Link T-N
# TODO
if Twr.nf == 0:
Bx_TN = np.array([])
Bt_TN = np.array([])
alpha_y=0
elif Twr.nf == 1:
if main_axis=='x':
Bx_TN = np.array([[0],[0],[UzT[0]]])
elif main_axis=='z':
Bx_TN = np.array([[UxT[0]],[0],[0]])
Bt_TN = np.array([[0],[CyT[0]],[0]])
Twr.gzf = q[0,0]
alpha_y = np.dot(CyT.ravel(), q[0,0].ravel())
elif Twr.nf == 2:
if main_axis=='x':
Bx_TN = np.array([[0,0],[0,0],[UzT[0],UzT[1]]])
elif main_axis=='z':
Bx_TN = np.array([[UxT[0],UxT[1]],[0,0],[0,0]])
Twr.gzf = q[0:2,0]
Bt_TN = np.array([[0,0],[CyT[0],CyT[1]],[0,0]])
alpha_y = np.dot(CyT.ravel() , q[:2,0].ravel())
else:
# TODO use CzT
raise NotImplementedError()
#print('alpha_y',alpha_y)
R_TN = R_y(alpha_y)
if bTiltBeforeNac:
R_TN = np.dot(R_TN, R_y(theta_tilt_y))
R_EN = np.dot(R_ET, R_TN)
B_N = fBMatRecursion(B_T,Bx_TN,Bt_TN,R_ET,r_TN_inT)
B_N_inN = fB_inB(R_EN, B_N)
BB_N_inN = fB_aug(B_N_inN, Nac.nf)
MM_N = fBMB(BB_N_inN,Nac.MM)
KK_N = fBMB(BB_N_inN,Nac.KK)
Nac.r_O = Twr.r_O + np.dot(Twr.R_0b, r_TN_inT)
Nac.R_0b = R_EN
Nac.B = B_N
Nac.B_inB = B_N_inN
Nac.BB_inB = BB_N_inN
# TODO YAW
MM_Y = fBMB(BB_N_inN,Yaw.MM)
Yaw.r_O = Twr.r_O + np.dot(Twr.R_0b, r_TN_inT)
Yaw.R_0b = R_EN
Yaw.B = B_N
Yaw.B_inB = B_N_inN
Yaw.BB_inB = BB_N_inN
# TODO Gen
# ---------------------------------------------
# Link N-S
q_psi = q[iPsi,0]
if main_axis=='x':
R_NS = R_z(q_psi + np.pi)
elif main_axis=='z':
R_NS = R_x(q_psi + np.pi)
if not bTiltBeforeNac:
R_NS = np.dot(R_y(theta_tilt_y),R_NS)
R_ES = np.dot(R_EN, R_NS)
r_NS = np.dot(R_EN, r_NS_inN)
Bx_NS = np.array([[0],[0],[0]])
if main_axis=='x':
Bt_NS = np.array([[0],[0],[1]])
elif main_axis=='z':
Bt_NS = np.array([[1],[0],[0]])
B_S = fBMatRecursion(B_N,Bx_NS,Bt_NS,R_EN,r_NS)
B_S_inS = fB_inB(R_ES, B_S)
BB_S_inS = fB_aug(B_S_inS, Sft.nf)
MM_S = fBMB(BB_S_inS,Sft.MM)
KK_S = fBMB(BB_S_inS,Sft.KK)
Sft.r_O = Nac.r_O + r_NS
Sft.R_0b = R_ES
Sft.B = B_S
Sft.B_inB = B_S_inS
Sft.BB_inB = BB_S_inS
# ---------------------------------------------
# Link S-B1
nB = len(Blds)
# Point R
r_SR = np.dot(R_ES, r_SR_inS)
B_R = fBMatRecursion(B_S,[],[],R_ES,r_SR)
B_R_bis = fBMatTranslate(B_S,r_SR)
# Points B1, B2, B3
MM_B = np.zeros((nDOF,nDOF))
KK_B = np.zeros((nDOF,nDOF))
DD_B = np.zeros((nDOF,nDOF))
nf_done=0
nf_tot = sum([B.nf for B in Blds])
for i,B in enumerate(Blds):
psi_B= -i*2*np.pi/nB # 0 -2pi/2 2pi/3 or 0 pi
if main_axis=='x':
R_SB = R_z(0*np.pi + psi_B)
elif main_axis=='z':
R_SB = R_x(0*np.pi + psi_B)
R_SB = np.dot(R_SB, R_y(theta_cone_y))
R_EB = np.dot(R_ES, R_SB)
B_B_inB = fB_inB(R_EB, B_R)
BB_B_inB = fB_aug(B_B_inB, nf_tot, B.nf, nf_done)
nf_done += B.nf
# Full matrices
MM_B += fBMB(BB_B_inB,B.MM)
KK_B += fBMB(BB_B_inB,B.KK)
DD_B += fBMB(BB_B_inB,B.DD)
B.r_O = Sft.r_O + r_SR
B.B = B_R
B.B_inB = B_B_inB
B.BB_inB = BB_B_inB
# --- Final assembly
MM = MM_B.copy()
MM[:iPsi+1,:iPsi+1] += MM_S
MM[:Twr.nf,:Twr.nf] += MM_T + MM_N + MM_Y
KK = KK_B
KK[:iPsi+1,:iPsi+1] += KK_S
KK[:Twr.nf,:Twr.nf] += KK_T + KK_N
DD = DD_B
DD[:Twr.nf,:Twr.nf] += DD_T
## Display to screen
MM[np.abs(MM)< 1e-09] = 0
if DEBUG:
print('--------------------- Geom ---------------------')
print('r_ET_inE ',r_ET_inE .T)
print('r_TN_inT ',r_TN_inT .T)
print('r_NS_inN ',r_NS_inN .T)
print('r_SR_inS ',r_SR_inS .T)
print('-------------------- Tower ---------------------')
print('CyT\n',CyT)
print('alpha_y',alpha_y)
print('B_T\n',B_T)
print('B_T_inT\n',B_T_inT)
print('BB_T_inT\n',BB_T_inT)
# print('MM_T\n',MM_T)
# print('KK_T\n',KK_T)
# print('DD_T\n',DD_T)
print('------------------- Nacelle --------------------')
print('B_N\n',B_N)
print('B_N_inN\n',B_N_inN)
print('BB_N_inN\n',BB_N_inN)
print('MM_N\n',MM_N)
# print('-------------------- Shaft ---------------------')
# print('R_NS\n',R_NS)
# print('BB_S_inS\n',BB_S_inS)
# print('MM_S\n',MM_S)
# print('------------------- Blades ---------------------')
# #print('BB_B1_inB1')
# #print(BB_B1_inB1)
# print('MM_B\n',MM_B)
# print('KK_B\n',KK_B)
# print('DD_B\n',DD_B)
# print('-------------------- Full ----------------------')
# print('M ("manually" built)')
# print(MM)
# print('K ("manually" build)')
# print(KK)
## Eigenvalue analysis
#[Q,Lambda]=eig(K,M);
#Omega2=diag(Lambda);
#[Omega2,Isort]=sort(Omega2);
#Q=Q(:,Isort);
#f_eva= sqrt(Omega2)/(2*pi);
#for i=1:length(f_eva);
# fprintf('f%d = %.3f \n',i,f_eva(i))
# --- returning everthin in a structure class
Struct = Structure(main_axis=main_axis,theta_cone=theta_cone_y,theta_tilt=theta_tilt_y,bTiltBeforeNac=bTiltBeforeNac)
Struct.Grd = GroundBody()
Struct.Twr = Twr
Struct.Yaw = Yaw
Struct.Nac = Nac
Struct.Sft = Sft
Struct.Blds = Blds
Struct.MM = MM
Struct.KK = KK
Struct.DD = DD
Struct.q = q
Struct.r_ET_inE=r_ET_inE
Struct.r_TN_inT=r_TN_inT
Struct.r_NS_inN=r_NS_inN
Struct.r_SR_inS=r_SR_inS
Struct.init_trigger()
return Struct
if __name__=='__main__':
np.set_printoptions(linewidth=500)
``` |
{
"source": "jingege315/gobang",
"score": 3
} |
#### File: Helper/Base/ChessLocation.py
```python
from .Chess import Chess
class ChessLocation(object):
def __init__(self, x: int, y: int, chess: Chess):
self.x = x
self.y = y
self.chess = chess
```
#### File: Helper/Base/Chess.py
```python
from enum import Enum
class Chess(Enum):
"""
the chess's enum
"""
BLACK = 1
WHITE = 2
NONE = 3
def is_black(self) -> bool:
return self == self.BLACK
def is_white(self) -> bool:
return self == self.WHITE
def is_none(self) -> bool:
return self == self.NONE
def exchange(self):
return self.BLACK if self.is_white() else self.WHITE
def is_chess_color(self) -> bool:
return self.is_black() or self.is_white()
def is_chess(self) -> bool:
return self.is_chess_color() or self.is_none()
def __str__(self) -> str:
if self.is_black():
return '1'
elif self.is_white():
return '2'
else:
return ' '
```
#### File: Helper/Base/WindowSize.py
```python
class WindowSize(object):
def __init__(self, x1: int, y1: int, x2: int, y2: int):
"""
the UI size to set
:param x1:
:param y1:
:param x2:
:param y2:
"""
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
```
#### File: Helper/Controller/Game.py
```python
from ..Player import *
class Game(object):
"""
control the condition logic and the drawing logic of gobang
"""
def __init__(self, win_size, board_size, callback_win):
"""
:param callback_win: the callback function of winning,param:chess:Chess
"""
self._judge = Judge(win_size)
self.board = BoardSave(board_size)
self._callback_win = callback_win
self._win_now = False
self._chess_now = Chess.BLACK
def is_win(self):
is_win = self._judge.is_win(self.board)
if not self._win_now and is_win:
chess = self._judge.get_win_chess()
self._callback_win(chess)
self._win_now = True
return is_win
def can_move(self, x, y) -> bool:
return not self.board.is_moved(x, y) or self._win_now
def move(self, x, y):
if self.can_move(x, y):
self.board.add(x, y, self._chess_now)
self._chess_now = self._chess_now.exchange()
self.is_win()
def back(self):
if self.board.back():
self._win_now = False
self._chess_now = self._chess_now.exchange()
def back2steps(self):
self.board.back()
self.board.back()
self._win_now = False
def clear(self):
self.board.clear()
self._chess_now = Chess.BLACK
# stand whether win
self._win_now = False
def get_chess_now(self) -> Chess:
return self._chess_now
```
#### File: Helper/Player/PlayerAI.py
```python
from . import Player
from ..Base import *
class PlayerAI(Player):
def __init__(self, chess_self: Chess):
super().__init__(chess_self)
@staticmethod
def is_auto():
return True
def get_scores(self) -> list:
"""
:return: after calling 'getNext' function,the scores in each point considered
"""
raise NotImplementedError()
```
#### File: Helper/Player/Player.py
```python
from ..Base import *
class Player(object):
"""
the player playing gobang can be human or AI
"""
def __init__(self, chess_self: Chess):
self._chess_self = chess_self
def get_next(self, board: BoardSave) -> (int, int):
"""
the player can use the information of board and order to decide how to move in next step
:param board:
:return:
return (x,y):the move about next step
return None:waiting for human click
"""
raise NotImplementedError()
@staticmethod
def is_auto() -> bool:
"""
:return: whether the player is AI to auto move chess
"""
raise NotImplementedError()
def get_chess_color(self) -> Chess:
"""
:return: the chess's color of this player
"""
return self._chess_self
```
#### File: gobang/Test/test_valuablePointLinear.py
```python
from unittest import TestCase
from Helper import *
class TestValuablePointLinear(TestCase):
def test_getPoints(self):
linear = ValuablePointLinear()
board_size = BoardSize(15, 15)
save = BoardSave(board_size)
save.add(1, 1, Chess.BLACK)
save.add(0, 14, Chess.WHITE)
save.add(7, 7, Chess.BLACK)
save.add(8, 8, Chess.WHITE)
print(save)
ret = linear.get_points(save, Chess.BLACK)
# (1,1) -> 8
# (0,14) -> 3
# (7,7)+(8,8) -> 8*2-4=12
assert len(ret) == 8 + 3 + 12
print(ret)
``` |
{
"source": "Jinger52035/geochemistrypy",
"score": 3
} |
#### File: geochemistrypy/model/clustering.py
```python
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# from matplotlib.ticker import FixedLocator, FixedFormatter
import numpy as np
from utils.base import save_data
from utils.base import save_fig
from global_variable import MODEL_OUTPUT_IMAGE_PATH
from global_variable import DATASET_OUTPUT_PATH
class ClusteringWorkflowBase(object):
name = None
# TODO: build virtualization in 2D, 3D graph and silhouette plot
common_function = ['Cluster Centers',
'Cluster Labels',
'Virtualization in 2D graph',
'Virtualization in 3D graph',
'Silhouette Plot']
special_function = None
@classmethod
def show_info(cls):
print("*-*" * 2, cls.name, "is running ...", "*-*" * 2)
print("Expected Functionality:")
function = cls.common_function + cls.special_function
for i in range(len(function)):
print("+ ", function[i])
def __init__(self):
self.model = None
self.X = None
self.naming = None
def fit(self, X, y=None):
# keep y to be in consistent with the framework
self.X = X
self.model.fit(X)
def get_cluster_centers(self):
print("-----* Clustering Centers *-----")
print(self.model.cluster_centers_)
def get_labels(self):
print("-----* Clustering Labels *-----")
self.X['clustering result'] = self.model.labels_
print(self.X)
save_data(self.X, f"{self.naming}", DATASET_OUTPUT_PATH)
def plot_silhouette_diagram(self, n_clusters: int = 0,
show_xlabel: bool = True, show_ylabel: bool = True, show_title: bool = True):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
# Set the figure size in inches.
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
# I hope the type of self.X = <class 'pandas.core.frame.DataFrame'>
len_X = len(self.X)
ax1.set_ylim([0, len_X + (n_clusters + 1) * 10])
# For example:cluster_labels = [4 4 1 ... 0 0 0]
cluster_labels = self.X['clustering result']
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(self.X, cluster_labels)
print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(self.X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(self.X.iloc[:, [0]], self.X.iloc[:, [1]], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = self.model.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
print("Successfully graph the Silhouette Diagram.")
save_fig(f"Silhouette Diagram - {self.naming}", MODEL_OUTPUT_IMAGE_PATH)
# plt.show()
def plot_2d_graph(self):
pass
def plot_3d_graph(self,):
pass
class KMeansClustering(ClusteringWorkflowBase):
name = "KMeans"
special_function = ['KMeans Score']
def __init__(self,
n_clusters=8,
init="k-means++",
n_init=10,
max_iter=300,
tol=1e-4,
verbose=0,
random_state=None,
copy_x=True,
algorithm="auto"):
super().__init__()
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.algorithm = algorithm
self.model = KMeans(n_clusters=self.n_clusters,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
tol=self.tol,
verbose=self.verbose,
random_state=self.random_state,
copy_x=self.copy_x,
algorithm=self.algorithm)
self.naming = KMeansClustering.name
def _get_scores(self):
print("-----* KMeans Scores *-----")
print("Inertia Score: ", self.model.inertia_)
print("Calinski Harabasz Score: ", metrics.calinski_harabasz_score(self.X, self.model.labels_))
print("Silhouette Score: ", metrics.silhouette_score(self.X, self.model.labels_))
def special_components(self):
self._get_scores()
self.plot_silhouette_diagram(self.n_clusters)
``` |
{
"source": "jinger7281/sock5py",
"score": 3
} |
#### File: jinger7281/sock5py/slaves.py
```python
import socket
import threading
import sockutils
import struct
class slave(threading.Thread):
socketHandle=None
socketAddr=None
proxySocket=None
def setClientInfo(self,socketHandle,socketAddr):
self.socketHandle=socketHandle
self.socketAddr=socketAddr
def run(self):
try:
cmd=self.socketHandle.recv(3)
if sockutils.step1(cmd)[0] != 0:
print(sockutils.step1(cmd))
exit(-1)
self.socketHandle.send(b'\x05\x00')
cmd=self.socketHandle.recv(10)
step2Ret=sockutils.step2(cmd)
if step2Ret[0] != 0:
exit(-1)
print("线程:",self.name,"连接",step2Ret[1],":",step2Ret[2][0])
step2Hex=struct.pack(">BBBBBBBBH",5,0,0,1,cmd[4],cmd[5],cmd[6],cmd[7],cmd[8]+cmd[9])
self.socketHandle.send(step2Hex)
data=b""
while True:
tmp=self.socketHandle.recv(256)
data+=tmp
if len(tmp)<256:
break
if data == b'':
print(self.name,"没有数据,断开连接")
exit()
self.proxySocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.proxySocket.connect((step2Ret[1],step2Ret[2][0]))
self.proxySocket.send(data)
while True:
tmp=self.proxySocket.recv(10240)
self.socketHandle.send(tmp)
if len(tmp)==0:
break
self.proxySocket.close()
self.socketHandle.close()
print("----",self.name,"end----")
except Exception as e:
self.proxySocket.close()
self.socketHandle.close()
print(e)
print("----",self.name,"Exception End----")
``` |
{
"source": "jingerbread/p3_test_driver",
"score": 2
} |
#### File: p3_data/p3_data/p3_common_lib.py
```python
from __future__ import division
from __future__ import print_function
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import matplotlib.cm as cm
import shutil
import os
import itertools
import datetime
import scipy.interpolate
import scipy.stats
import math
import six
import re
import bz2
import codecs
import glob
import json
import re
import dateutil
import tarfile
import io
import IPython.display
import ipywidgets
import copy
import uuid
import warnings
import traceback
import sys
import io
import csv
import logging
import operator
import scipy.constants
from matplotlib import gridspec
from IPython.display import display, HTML
from contextlib import closing
from sklearn.externals.joblib import Parallel, delayed
from collections import namedtuple
from scipy.stats import rv_discrete
from scipy.optimize import OptimizeResult, minimize_scalar
#
# Dataframe manipulation
#
def flatten_multiindex_columns(df):
df = df.copy()
df.columns = ['_'.join([str(x) for x in col]) for col in df.columns.values]
return df
def series_to_df(s, series_name=None):
df = pd.DataFrame(s)
if series_name: df.columns = [series_name]
return df
def add_cumsum(df, src_col, new_col):
df[new_col] = df[src_col].cumsum()
return df
def string_list_subtract_regex(string_list, exclude_regex_list):
if exclude_regex_list:
result = []
exclude_regex = '|'.join(['%s$' % x for x in exclude_regex_list])
compiled = re.compile(exclude_regex)
result = [s for s in string_list if not compiled.match(s)]
return result
else:
return string_list
def get_varying_column_names(df, exclude_cols=[], ignore_nan=False):
"""Return names of columns with more than one value.
Column names that match regular expressions in exclude_cols will not be returned.
If ignore_nan is True, column values with NaN are not considered different."""
check_cols = string_list_subtract_regex(df.columns, exclude_cols)
take_cols = []
if len(df) > 1:
for col in check_cols:
significant_values = df[col]
if ignore_nan:
significant_values = significant_values[~significant_values.isnull()]
try:
value_counts = significant_values.apply(make_hash).value_counts()
if len(value_counts) > 1:
take_cols.append(col)
except:
# If an error occurs, ignore the column.
pass
return take_cols
def take_varying_columns(df, exclude_cols=[], ignore_nan=False):
take_cols = get_varying_column_names(df, exclude_cols=exclude_cols, ignore_nan=ignore_nan)
return df[take_cols]
def get_common_column_names(df, exclude_cols=[], ignore_nan=False):
"""Return names of columns with one value. All columns are returned if there are 0 or 1 rows.
Column names that match regular expressions in exclude_cols will not be returned.
If ignore_nan is True, column values with NaN are not considered different."""
check_cols = string_list_subtract_regex(df.columns, exclude_cols)
take_cols = []
if len(df) <= 1:
take_cols = check_cols
else:
for col in check_cols:
significant_values = df[col]
if ignore_nan:
significant_values = significant_values[~significant_values.isnull()]
value_counts = significant_values.apply(make_hash).value_counts()
if len(value_counts) <= 1:
take_cols.append(col)
return take_cols
def take_common_columns(df, exclude_cols=[], ignore_nan=False):
take_cols = get_common_column_names(df, exclude_cols=exclude_cols, ignore_nan=ignore_nan)
return df[take_cols]
def build_mask_string(row, df_name='d'):
row_criteria = []
for col in row.index.values:
value = row[col]
if isinstance(value, str):
col_criteria = "({2}.{0}=='{1}')".format(col, value.replace("'", "\\'"), df_name)
else:
col_criteria = '({2}.{0}=={1})'.format(col, value, df_name)
row_criteria.append(col_criteria)
return '&\n'.join(row_criteria)
def dataframe_indexes_to_columns(df):
"""
Adds index columns as regular columns if necessary. Modifies df.
"""
for index_index, index_name in enumerate(df.index.names):
if index_name not in df.columns:
df[index_name] = df.index.get_level_values(index_name).values
return df
def dict_list_to_dict(li, key):
return dict(map(lambda x: (x[key], x), li))
def merge_dicts(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
From: http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
'''
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def format_group_name(columns_names, values):
if isinstance(values, tuple):
formatted_values = [str(int(x)) if type(x) is float else str(x) for x in values]
return ",".join([str(x) for x in formatted_values])
else:
return str(values)
def format_dict(d, attr_sep=',', kv_sep='='):
return attr_sep.join(['%s%s%s'%(k,kv_sep,v) for (k,v) in sorted(d.items())])
def flatten_list(li):
"""See also numpy.ravel()."""
result = []
for item in li:
result.extend(item)
return result
def force_float_or_nan(f):
try:
return np.float(f)
except:
return np.nan
def fix_column_types(df, dtypes):
"""Define datatypes for each column. Note that integers/bools will be imported as floats to support NaNs.
Modifies passed dataframe in place."""
# Fix all data types.
for column, dtype in dtypes.items():
if column not in df:
df[column] = np.nan
if dtype == 'datetime':
df[column] = df[column].apply(lambda t: force_utc(t))
elif dtype == 'float':
df[column] = df[column].apply(lambda f: force_float_or_nan(f))
elif dtype in ['str','unicode']:
df[column] = df[column].fillna('').astype('unicode')
elif dtype == 'ascii':
df[column] = df[column].fillna('').astype('str')
elif dtype == 'bool':
# Keep NaNs but convert any other value too boolean
mask = df[column].isnull()
df.loc[~mask,column] = df.loc[~mask,column].astype(dtype)
elif dtype == 'object':
pass
else:
df[column] = df[column].astype(dtype)
# Convert column names to consistent string type (unicode vs ascii)
df.columns = map(str,df.columns.values)
return df
def create_uuid_from_fields(rec, cols):
"""Create a deterministic uuid from specific record fields. This is used when there is not a recorded test_uuid value."""
data = ''
for col in cols:
if col in rec and pd.notnull(rec[col]):
value = str(rec[col])
if value:
data += value
data += '|'
if data == '':
raise Exception('Unable to find any fields to create a deterministic UUID')
gen_uuid = str(uuid.uuid5(uuid.UUID('035abefa-9a3a-402b-b573-d77327c7b532'), data))
print('create_uuid_from_fields: Generated %s from %s' % (gen_uuid, data))
return gen_uuid
def concat_dataframe_dicts(dataframe_dict_list):
"""Concatenate dataframes in matching keys in list of dicts."""
result = {}
keys = [set(d.keys()) for d in dataframe_dict_list if not d is None]
if keys:
keys = set.union(*keys)
for df_name in keys:
result[df_name] = pd.concat([d[df_name] for d in dataframe_dict_list if not d is None and df_name in d])
return result
def filter_dataframe_mask(df, warn_on_no_match=True, **kwargs):
"""Return a mask to filter a dataframe based on keyword arguments.
Values that are tuples will be applied with between().
Lists and scalars will be applied with isin()."""
mask = np.ones(len(df)).astype(bool)
for col, criteria in kwargs.items():
if isinstance(criteria,tuple):
mask = mask & df[col].between(*criteria)
else:
if not isinstance(criteria,list):
criteria = [criteria]
mask = mask & df[col].isin(criteria)
if warn_on_no_match and np.sum(mask) == 0:
print('filter_dataframe: No matching records after filtering on %s=%s' % (col, criteria), file=sys.stderr)
warn_on_no_match = False
return mask
def filter_dataframe(df, warn_on_no_match=True, **kwargs):
"""Filter a dataframe based on keyword arguments.
Values that are tuples will be applied with between().
Lists and scalars will be applied with isin()."""
mask = filter_dataframe_mask(df, warn_on_no_match=warn_on_no_match, **kwargs)
return df[mask]
def reorder_index_columns(df, begin_cols=[], end_cols=[]):
"""Change the left-right order of columns in a multiindex."""
begin_cols = [c for c in begin_cols if c in df.index.names]
end_cols = [c for c in end_cols if c in df.index.names]
index_cols = begin_cols + sorted(list(set(df.index.names) - set(begin_cols) - set(end_cols))) + end_cols
return df.reset_index().set_index(index_cols).sort()
def reorder_dataframe_columns(df, begin_cols=[], end_cols=[], sort_middle=False):
"""Change the order of columns in a dataframe."""
all_cols = list(df.columns)
begin_cols = [c for c in begin_cols if c in all_cols]
end_cols = [c for c in end_cols if c in all_cols]
middle_cols = [c for c in all_cols if c not in begin_cols and c not in end_cols]
if sort_middle:
middle_cols = sorted(middle_cols)
ordered_cols = begin_cols + middle_cols + end_cols
return df[ordered_cols]
def percentile(n):
"""Based on http://stackoverflow.com/questions/17578115/pass-percentiles-to-pandas-agg-function."""
def _percentile(x):
return np.percentile(x, n)
_percentile.__name__ = 'percentile_%s' % n
return _percentile
def rename_columns(df, column_rename_dict):
"""Rename columns. Modifies passed dataframe in place.
OBSOLETE: Use rename_dataframe_column()"""
for old_column, new_column in column_rename_dict.items():
if old_column in df:
if not new_column is None:
if new_column in df:
mask = (df[new_column].isnull()) & (~df[old_column].isnull())
df.loc[mask,new_column] = df.loc[mask,old_column]
else:
df[new_column] = df[old_column]
del df[old_column]
return df
def rename_dataframe_column(df, columns, level=0):
"""Rename the names of a column.
From http://stackoverflow.com/questions/29369568/python-pandas-rename-single-column-label-in-multi-index-dataframe"""
def rename_apply (x, rename_dict):
try:
return rename_dict[x]
except KeyError:
return x
if isinstance(df.columns, pd.core.index.MultiIndex):
df.columns = df.columns.set_levels([rename_apply(x, rename_dict = columns ) for x in df.columns.levels[level]], level= level)
else:
df.columns = [rename_apply(x, rename_dict = columns ) for x in df.columns ]
return df
def column_name_to_title(s):
s = s.replace('_',' ')
return ' '.join([w[0].upper() + w[1:] for w in s.split(' ')])
def crossjoin_df(df1, df2, multi_index=False, **kwargs):
"""
Make a cross join (cartesian product) between two dataframes by using a constant temporary key.
Also sets a MultiIndex which is the cartesian product of the indices of the input dataframes.
See: https://github.com/pydata/pandas/issues/5401
:param df1 dataframe 1
:param df1 dataframe 2
:param kwargs keyword arguments that will be passed to pd.merge()
:return cross join of df1 and df2
"""
df1['_tmpkey'] = 1
df2['_tmpkey'] = 1
res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)
if multi_index:
res.index = pd.MultiIndex.from_product((df1.index, df2.index))
df1.drop('_tmpkey', axis=1, inplace=True)
df2.drop('_tmpkey', axis=1, inplace=True)
return res
def report_diff_html(x, equal_nan=False):
"""
From http://stackoverflow.com/questions/17095101/outputting-difference-in-two-pandas-dataframes-side-by-side-highlighting-the-d
"""
if x[0]==x[1]:
return unicode(x[0].__str__())
elif pd.isnull(x[0]) and pd.isnull(x[1]):
if equal_nan:
return u'nan'
return u'<table style="background-color:#00ff00;font-weight:bold;">'+\
'<tr><td>%s</td></tr><tr><td>%s</td></tr></table>' % ('nan', 'nan')
elif pd.isnull(x[0]) and ~pd.isnull(x[1]):
return u'<table style="background-color:#ffff00;font-weight:bold;">'+\
'<tr><td>%s</td></tr><tr><td>%s</td></tr></table>' % ('nan', x[1])
elif ~pd.isnull(x[0]) and pd.isnull(x[1]):
return u'<table style="background-color:#0000ff;font-weight:bold;">'+\
'<tr><td>%s</td></tr><tr><td>%s</td></tr></table>' % (x[0],'nan')
else:
return u'<table style="background-color:#ff0000;font-weight:bold;">'+\
'<tr><td>%s</td></tr><tr><td>%s</td></tr></table>' % (x[0], x[1])
def compare_dataframes_html(df1, df2, **kwargs):
"""
From http://stackoverflow.com/questions/17095101/outputting-difference-in-two-pandas-dataframes-side-by-side-highlighting-the-d
"""
panel = pd.Panel(dict(df1=df1, df2=df2))
if pd.options.display.max_colwidth < 500:
pd.options.display.max_colwidth = 500 # You need this, otherwise pandas will limit your HTML strings to 50 characters
return HTML(panel.apply(lambda x: report_diff_html(x, **kwargs), axis=0).to_html(escape=False))
#
# File I/O
#
def move_data_files(src, dst):
if not os.path.isdir(dst):
os.mkdir(dst)
for filename in glob.glob(src):
print('Moving ' + filename + ' -> ' + dst)
shutil.move(filename, dst)
def text_files_iterator(src, verbose=False):
"""Iterate over lines in text files. src can contain wildcards. Files may be compressed with bzip2."""
for filename in glob.glob(src):
if verbose: print('Reading %s ' % filename)
ext = os.path.splitext(filename)[1]
if ext == '.bz2':
with closing(bz2.BZ2File(filename, 'rb')) as data_file:
reader = codecs.getreader("utf-8")
for line in reader(data_file):
yield line
else:
with open(filename, 'r') as file:
for line in file:
yield line
def load_json_from_file(filename):
ext = os.path.splitext(filename)[1]
print('Reading %s ext: %s' % (filename, ext))
if ext == '.bz2':
with closing(bz2.BZ2File(filename, 'rb')) as data_file:
reader = codecs.getreader("utf-8")
data = json.load(reader(data_file))
else:
with open(filename) as data_file:
data = json.load(data_file)
return data
def glob_file_list(filespecs):
if not isinstance(filespecs,list):
filespecs = [filespecs]
return sum(map(glob.glob, filespecs), [])
def load_json_records(src, verbose=False, n_jobs=-1):
recs = []
filenames = glob_file_list(src)
print('Loading records from %d files...' % len(filenames))
pjobs = [delayed(load_json_from_file)(filename) for filename in filenames]
file_record_list = Parallel(n_jobs=n_jobs)(pjobs) # list of lists of records
recs = flatten_list(file_record_list)
return recs
def load_json_from_file_as_dataframe(filename, filename_column_name='loaded_filename', ignore_error=False):
try:
df = pd.DataFrame(load_json_from_file(filename))
df[filename_column_name] = filename
return df
except Exception:
print('EXCEPTION while loading JSON file %s: %s' % (filename, traceback.format_exc()), file=sys.stderr)
if ignore_error:
return pd.DataFrame()
else:
raise
def load_json_records_as_dataframe(src, verbose=False, n_jobs=-1, ignore_error=False):
recs = []
filenames = glob_file_list(src)
print('Loading records from %d files...' % len(filenames))
pjobs = [delayed(load_json_from_file_as_dataframe)(filename, ignore_error=ignore_error) for filename in filenames]
df_list = Parallel(n_jobs=n_jobs)(pjobs)
return pd.concat(df_list, ignore_index=True, copy=False, sort=False)
def save_json_to_file(data, filename, sort_keys=False, indent=None, ensure_ascii=False):
ext = os.path.splitext(filename)[1]
temp_file_name = '%s.tmp%s' % os.path.splitext(filename)
if ext == '.bz2':
with closing(bz2.BZ2File(temp_file_name, 'wb')) as data_file:
json.dump(data, data_file, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii)
else:
with open(temp_file_name, 'w') as data_file:
json.dump(data, data_file, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii)
os.rename(temp_file_name, filename)
#
# Plotting
#
def plot_groups(df, x_col, y_col, group_by_columns=None, title=None, xlabel=None, ylabel=None, max_legend_items=10,
sort_columns=[0], semilogx=False, semilogy=False, agg=None, xlim=None, ylim=None, xticks=None):
# Cyclers for different plot styles
lines = ['-','--','-.',':']
markers = ['o','s','*','x','D','v','^','<','>','8','p','|']
colors = ['r','b','g','c','m','y','k']
lineCycler = itertools.cycle(lines)
markerCycler = itertools.cycle(markers)
colorCycler = itertools.cycle(colors)
fig = plt.figure(title, figsize=(20,10))
fig.clf()
if title:
fig.suptitle(title, fontsize=12)
num_groups = 0
if group_by_columns is None:
num_groups = 1
df.plot(x=x_col, y=y_col, style=next(markerCycler) + next(colorCycler) + next(lineCycler))
else:
group_by_columns = list(set(group_by_columns) - {x_col})
if sort_columns == [0]:
sort_columns = [x_col]
for name, group in df.groupby(group_by_columns):
num_groups += 1
nameStr = format_group_name(group_by_columns, name)
if len(group) > 100:
style = next(colorCycler) + next(lineCycler)
else:
style = next(markerCycler) + next(colorCycler) + next(lineCycler)
if sort_columns is None:
sorted_group = group
else:
sorted_group = group.sort_values(sort_columns)
plt.plot(sorted_group[x_col].values, sorted_group[y_col].values, style, label=nameStr)
if agg is not None:
agg_df = df.groupby(by=[x_col], as_index=False).agg({y_col: agg})
plt.plot(agg_df[x_col].values, agg_df[y_col].values, 'xk-', label=agg)
axes = plt.gca()
if xlabel is None:
xlabel = x_col
if ylabel is None:
ylabel = y_col
if num_groups <= max_legend_items:
axes.legend(loc='best')
else:
print('plot_groups: not showing legend because num_groups=%d' % num_groups)
if semilogx:
axes.semilogx()
fmt = matplotlib.ticker.ScalarFormatter(useOffset=False)
fmt.set_scientific(False)
axes.xaxis.set_major_formatter(fmt)
if semilogy:
axes.semilogy()
fmt = matplotlib.ticker.ScalarFormatter(useOffset=False)
fmt.set_scientific(False)
axes.yaxis.set_major_formatter(fmt)
if xlim:
axes.set_xlim(xlim)
if ylim:
axes.set_ylim(ylim)
if xticks:
axes.xaxis.set_ticks(xticks)
fig.autofmt_xdate()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
#plt.show()
return fig
def show_dataframe_mpl(df, axes, col_labels=True):
"""See also pd.DataFrame.plot(table=True)."""
cellText = list(df.values)
if col_labels:
colLabels=list(df.columns.values)
else:
colLabels = None
tbl = axes.table(cellText=cellText, colLabels=colLabels, loc='center')
axes.axis('off')
# tbl.set_fontsize(5)
# for cell in tbl.get_child_artists():
# cell.set_height(0.04)
# cell.set_linewidth(0.001)
def show_series_mpl(series, axes):
# df = pd.DataFrame(series).reset_index()
df = pd.DataFrame({'index': series.index.values, 'value': series.values})
return show_dataframe_mpl(df, axes, col_labels=False)
def expand_xlim(x, axes, margin=0.0):
"""If necessary, expand x limit to that x-margin and x+margin are visible."""
a, b = axes.get_xlim()
a = min(a, x - margin)
b = max(b, x + margin)
axes.set_xlim(a, b)
def expand_ylim(y, axes, margin=0.0):
"""If necessary, expand y limit to that y-margin and y+margin are visible."""
a, b = axes.get_ylim()
a = min(a, y - margin)
b = max(b, y + margin)
axes.set_ylim(a, b)
#
# Optimization
#
class CachedFunction(object):
"""Caches function calls with the same arguments."""
def __init__(self, fun, record_history=False):
self.fun = fun
self.cached_points = {}
self.record_history = record_history
self.history = [] # ordered history of uncached function evaluations
self.uncached_fev = 0 # number of actual uncached function evaluations (cache misses)
self.cached_fev = 0 # number of cached function calls (cache hits)
def __call__(self, *args, **kwargs):
cache_key = make_hashable((args, kwargs))
# logging.info('cache_key=%s' % str(cache_key))
try:
y = self.cached_points[cache_key]
self.cached_fev += 1
return y
except KeyError:
# logging.info('Calling function to evaluate cache_key=%s' % str(cache_key))
self.uncached_fev += 1
y = self.fun(*args, **kwargs)
self.cached_points[cache_key] = y
if self.record_history:
self.history.append(args + (kwargs, y,))
return y
class SmoothedDiscreteFunction(object):
"""Smoothes a scalar function of a single discrete variable by linear interpolation between points."""
def __init__(self, fun, x_domain):
"""
Args:
x_domain (np.ndarray): Array of values that represent the discrete domain of the function.
Values can have type int or float.
"""
self.fun = fun
self.x_domain = np.sort(x_domain)
def __call__(self, x):
if x < self.x_domain[0] or x > self.x_domain[-1]:
raise ValueError('x=%s is outside the domain [%s,%s]' % (x, self.x_domain[0], self.x_domain[-1]))
x0_index = np.searchsorted(self.x_domain, x, side='right') - 1
if self.x_domain[x0_index] == x:
y = self.fun(x)
logging.info('SmoothedDiscreteFunction(%f) = fun(%f) = %f' % (x, x, y))
return y
X = self.x_domain[x0_index:x0_index+2]
Y = np.array([self.fun(xx) for xx in X])
ifun = scipy.interpolate.interp1d(X, Y, assume_sorted=True, copy=False)
y = ifun([x])[0]
logging.info('SmoothedDiscreteFunction(%f) ~ fun(%s) = %f' % (x, X, y))
return y
class SteppedDiscreteFunction(object):
"""Provided with a scalar function of multiple discrete variables, this will extend the domain
to all real numbers by rounding down to the nearest value in the domain. This is performed for each
dimension separately. This will create multi-dimensional "step" functions that are flat (zero gradient)
except at the points in the original domain, where the gradients may be undefined.
This can be used with `CachedFunction` to round down to the nearest point and cache that point."""
def __init__(self, fun, x_domain):
"""
Args:
x_domain (list(np.ndarray)): Array of values that represent the discrete domain of the function.
Values can have type int or float.
"""
self.fun = fun
self.x_domain = [np.sort(xi_domain) for xi_domain in x_domain]
def convert_x(self, x):
x = np.atleast_1d(x)
assert(len(x) == len(self.x_domain))
x_nearest = np.zeros(len(self.x_domain))
for i in range(len(self.x_domain)):
if x[i] <= self.x_domain[i][0]:
x_nearest[i] = self.x_domain[i][0]
elif x[i] >= self.x_domain[i][-1]:
x_nearest[i] = self.x_domain[i][-1]
else:
xi0_index = np.searchsorted(self.x_domain[i], x[i], side='right') - 1
x_nearest[i] = self.x_domain[i][xi0_index]
return x_nearest
def __call__(self, x):
x_nearest = self.convert_x(x)
y = self.fun(x_nearest)
# logging.info('SteppedDiscreteFunction(%s) ~ fun(%s) = %f' % (x, x_nearest, y))
return y
class PandasSeriesFunction(object):
"""Make a function out of a Pandas Series object."""
def __init__(self, series):
self.series = series
def __call__(self, x):
return self.series.ix[tuple(np.atleast_1d(x))]
class LoggingFunction(object):
"""This function wrapper will log all function calls."""
def __init__(self, fun=None, name=None):
self.fun = fun
if name is None:
try:
name = fun.__name__
except:
name = 'LoggingFunction'
self.name = name
def __call__(self, *args, **kwargs):
arg_str = [repr(a) for a in args]
kwarg_str = ['%s=%s' % (k,repr(v)) for k,v in kwargs.iteritems()]
both_str = arg_str + kwarg_str
joined_str = ', '.join(both_str)
if self.fun is None:
logging.info('%s(%s)' % (self.name, joined_str))
else:
result = self.fun(*args, **kwargs)
logging.info('%s(%s) -> %s' % (self.name, joined_str, result))
return result
class defaultlist(list):
"""Based on http://stackoverflow.com/questions/869778/populating-a-list-array-by-index-in-python."""
def __init__(self, iterable=None, default_factory=None):
args = []
if iterable:
args = [iterable]
super(defaultlist, self).__init__(*args)
if default_factory is None:
default_factory = lambda: None
self.default_factory = default_factory
def __setitem__(self, index, value):
size = len(self)
if index >= size:
self.extend(self.default_factory() for _ in range(size, index + 1))
list.__setitem__(self, index, value)
class ArgsToArrayMap(object):
def __init__(self, arg_map):
"""
Args:
arg_map (list): List of dict with the following keys:
kwarg_name: Name of keyword argument.
arg_number: Position of argument. Must use exactly one of kwarg or arg_number.
array_size: If argument should be a list, specify the size.
value_if_missing: If present, this value will be used by args_to_array if the argument is missing.
If this key is not present, a missing argument will produce an AttributeError exception.
fixed_arg_value: If present, this argument will not be included in the array but array_to_args will include
this fixed value.
dtype: If present, arguments returned by array_to_args() will be converted to this type by
passing to this function as a parameter.
"""
self.arg_map = arg_map
def args_to_array(self, *args, **kwargs):
"""
Returns the array as a list.
"""
a = []
for arg_info in self.arg_map:
if 'fixed_arg_value' not in arg_info:
kwarg_name = arg_info.get('kwarg_name')
if kwarg_name is not None:
if 'value_if_missing' in arg_info:
arg_value = kwargs.get(kwarg_name, arg_info['value_if_missing'])
else:
arg_value = kwargs[kwarg_name]
else:
arg_number = arg_info.get('arg_number')
if arg_number is not None:
if arg_number >= len(args) and 'value_if_missing' in arg_info:
arg_value = arg_info['value_if_missing']
else:
arg_value = args[arg_number]
else:
raise AttributeError('You must specify kwarg_name or arg_number.')
array_size = arg_info.get('array_size')
if array_size is None:
arg_value = [arg_value]
elif len(arg_value) != array_size:
raise ValueError('Value for argument %s has incorrect size.' % str(arg_info))
a.extend(arg_value)
return a
def array_to_args(self, array):
args = defaultlist()
kwargs = {}
a = list(array)
for arg_info in self.arg_map:
if 'fixed_arg_value' in arg_info:
arg_value = arg_info['fixed_arg_value']
else:
num_elements = arg_info.get('array_size', 1)
arg_value = a[0:num_elements]
a = a[num_elements:]
if arg_info.get('array_size') is None:
arg_value = arg_value[0]
dtype = arg_info.get('dtype')
if dtype is not None:
arg_value = dtype(arg_value)
if arg_info.get('kwarg_name') is not None:
kwargs[arg_info.get('kwarg_name')] = arg_value
elif arg_info.get('arg_number') is not None:
args[arg_info.get('arg_number')] = arg_value
return args, kwargs
class AttributesToArray(object):
"""Maps an array to/from object attributes.
>>> def __init__(self):
>>> arg_map = [
>>> dict(kwarg_name='trend_parameters', array_size=self.num_trend_parameters, dtype=np.array),
>>> dict(kwarg_name='decay_parameters', array_size=self.num_decay_parameters, dtype=np.array),
>>> dict(kwarg_name='season_parameters', array_size=self.num_season_parameters, dtype=np.array),
>>> ]
>>> args_to_array_map = ArgsToArrayMap(arg_map)
>>> self.attributes_to_array = AttributesToArray(args_to_array_map, self)
"""
def __init__(self, args_to_array_map, bound_object):
self.bound_object = bound_object
self.args_to_array_map = args_to_array_map
def set_array(self, array):
args, kwargs = self.args_to_array_map.array_to_args(array)
for k,v in kwargs.iteritems():
self.bound_object.__dict__[k] = v
def get_array(self):
array = self.args_to_array_map.args_to_array(**self.bound_object.__dict__)
return array
class ArgsToArrayFunction(object):
"""This function wrapper will convert a function called with positional and
keyword arguments to a function called with an array.
This is useful when passing to minimize()."""
def __init__(self, fun, args_to_array_map):
"""
Args:
args_to_array_map(ArgsToArrayMap):
"""
self.fun = fun
self.args_to_array_map = args_to_array_map
def __call__(self, array, *extra_args, **override_kwargs):
"""Convert call with array to call with args and kwargs."""
args, kwargs = self.args_to_array_map.array_to_args(array)
kwargs.update(override_kwargs)
args += extra_args
result = self.fun(*args, **kwargs)
return result
def fit_parabola(X, Y):
if not (len(X) == 3 and len(Y) == 3):
raise ValueError()
M = np.matrix(np.array([X**2, X, np.ones(3)]).T)
a,b,c = np.linalg.solve(M,Y) # coefficients of ax**2 + bx + c
return a,b,c
def find_vertex_x_of_positive_parabola(X, Y):
a,b,c = fit_parabola(X,Y)
if a <= 0:
raise ValueError('Parabola not positive')
min_x = -b / (2.0*a)
return min_x
def discrete_scalar_convex_minimizer(fun, x0, x_domain, args=(), maxfev=None, maxiter=100, callback=None, **options):
"""Minimize a scalar function of a single variable that takes on a finite number of values.
The function must be "non-strictly" convex, meaning that it is possible for f(a) == f(b) around the minimum.
This trivial optimization approach currently begins with the first x in x_domain and moves to
the subsequent x until the function starts increasing.
This function is NOT recommended. Use `scalar_near_convex_minimizer` instead.
"""
bestx_index = 0
bestx = x_domain[bestx_index]
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
niter += 1
testx_index = bestx_index + 1
testx = x_domain[testx_index]
testy = fun(testx, *args)
funcalls += 1
if testy <= besty:
# Still going downhill or flat
bestx_index = testx_index
bestx = testx
besty = testy
else: #if testy > besty:
# We have now started going up
stop = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def scalar_gap_filling_minimizer(fun, bracket, args=(), tol=1e-6, maxfev=None, maxiter=100, callback=None, verbose=False,
parabolic_method=False, golden_section_method=False, **options):
"""Find a local minimum of a scalar function of a single variable.
The function may have flat spots where f(a) == f(b) for a != b and this method will
attempt to search around and within the flat spots.
The function must have exactly one local minimum in the bracket.
This method maintains a left and right bracket, where the function value is greater than the best known minimum.
It also maintains a list of best x values, and the function values at all of these x values equals the best known minimum.
At each iteration, it finds the largest gap in these x values (including the brackets) and selects
the point in the center of the largest gap.
It will then either adjust the bracket or add to the list of best x values.
The method terminates when the largest gap is less than bracket_tol.
Args:
bracket (tuple): A tuple of the bounds of the function (x_min, x_max).
Optionally, a middle point can be specified and it will be the initial best point.
tol (float): The method terminates when the largest gap is less than this value.
"""
# bestx is a list.
# besty is a scalar and equals f(x) for all x in bestx.
funcalls = 0
# print('parabolic_method=%s,golden_section_method=%s' % (parabolic_method,golden_section_method))
if len(bracket) == 2:
bracket_left_x = bracket[0]
bracket_right_x = bracket[1]
bestx = [np.mean([bracket_left_x, bracket_right_x])]
a = bracket_left_x
b = bracket_right_x
if golden_section_method:
bestx = [b - (b - a) / scipy.constants.golden]
else:
bestx = [np.mean([a, b])]
elif len(bracket) == 3:
bracket_left_x = bracket[0]
bracket_right_x = bracket[2]
bestx = [bracket[1]]
else:
raise ValueError('Invalid bracket')
if not (bracket_left_x <= bestx[0] <= bracket_right_x):
raise ValueError('Invalid bracket')
# Evaluate function at bestx.
besty = fun(bestx[0])
funcalls += 1
# Evaluate function at brackets to determine if they are better than the initial bestx.
bracket_left_y = fun(bracket_left_x, *args)
bracket_right_y = fun(bracket_right_x, *args)
funcalls += 2
if bracket_left_y < besty:
bestx = [bracket_left_x]
besty = bracket_left_y
if bracket_right_y < besty:
bestx = [bracket_right_x]
besty = bracket_right_y
if verbose: logging.info('bracket=(%f,%s,%f); besty=%f' % (bracket_left_x, str(bestx), bracket_right_x, besty))
niter = 0
while niter < maxiter:
niter += 1
X = np.array([bracket_left_x] + bestx + [bracket_right_x])
Y = np.array([bracket_left_y] + [besty] * len(bestx) + [bracket_right_y])
testx = None
testx_index = None
# If we have exactly one bestx, then fit a parabola to the 3 points and test the vertex.
if parabolic_method and len(bestx) == 1:
if verbose: logging.info('Attempting parabolic method')
try:
# Attempt to fit a parabola to the 3 points and find the vertex.
testx = find_vertex_x_of_positive_parabola(X, Y)
if verbose: logging.info('Parabolic method returned testx=%f' % testx)
if testx <= bracket_left_x or testx >= bracket_right_x or testx == bestx[0]:
testx = None
elif testx <= bestx[0]:
testx_index = 0
else:
testx_index = 1
except:
# This will happen if a parabola can't be fit through the 3 points.
# Ignore error and use the gap method below.
testx = None
if testx is None:
# Measure gaps in brackets and bestx and find the largest one.
if verbose: logging.info('Attempting gap method')
gaps = np.diff(X)
testx_index = np.argmax(gaps)
gapsize = gaps[testx_index]
if gapsize < tol:
if verbose: logging.info('Achieved gap size tol')
break
# Pick a point between the largest gap.
a = X[testx_index]
b = X[testx_index + 1]
if golden_section_method:
golden_distance = (b - a) / scipy.constants.golden
if bool(np.random.randint(low=0, high=2)):
testx = b - golden_distance
else:
testx = a + golden_distance
else:
testx = np.mean([a, b])
if verbose: logging.info('gapsize=%f, len(bestx)=%d, testx=%f' % (gapsize, len(bestx), testx))
assert(testx is not None)
assert(testx_index is not None)
assert(bracket_left_x <= testx <= bracket_right_x)
testy = fun(testx, *args)
funcalls += 1
add_to_bestx = False
if testy < besty:
# Found a point better than all others so far.
# The new bracket will be the points to the immediate left and right of the test point.
bestx = [testx]
besty = testy
bracket_left_x = X[testx_index]
bracket_left_y = Y[testx_index]
bracket_right_x = X[testx_index + 1]
bracket_right_y = Y[testx_index + 1]
elif testy > besty:
# Point is worse than best. Reduce bracket.
if testx_index == 0:
# Test point was adjacent to left bracket.
bracket_left_x = testx
bracket_left_y = testy
elif testx_index == len(X) - 2:
# Test point was adjacent to right bracket.
bracket_right_x = testx
bracket_right_y = testy
else:
# Test point was inside the set of bestx points but is worse than besty.
# This indicates more than one local minima or a round off error.
# We will assume a round off error and handle it as if it had the same besty.
add_to_bestx = True
else:
# Point is same as best. Add it to the bestx list.
add_to_bestx = True
if add_to_bestx:
bestx = sorted(bestx + [testx])
if verbose: logging.info('bracket=(%f,%s,%f); besty=%f' % (bracket_left_x, str(bestx), bracket_right_x, besty))
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
break
# Return the x that is in the median of bestx.
bestx = bestx[int((len(bestx)-1)/2)]
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def multivariate_gap_filling_minimizer(fun, x0, bounds, args=(), tol=1e-6, maxfev=None, maxiter=2, callback=None, verbose=False, scalar_options={}, **options):
"""It is assumed that there is exactly one local minimum in the domain.
This multivariate method uses `scalar_gap_filling_minimizer` repeatedly along each dimension
for a fixed number of iterations. There is currently no other stopping criteria.
TODO: Use Powell's method to optimize a linear combination of dimensions at a time.
Args:
x0 (np.ndarray): Initial guess.
bounds: (min, max) pairs for each element in x, defining the bounds on that parameter.
tol: See `scalar_near_convex_minimizer`.
"""
ndims = len(x0)
if len(bounds) != ndims:
raise ValueError()
bestx = x0
besty = np.inf
niter = 0
funcalls = 0
while niter < maxiter:
niter += 1
for i in range(ndims):
# if verbose:
# logging.info('multivariate_near_convex_minimizer: dimension %d' % (i,))
# Function of single variable that we will optimize during this iteration.
def scalar_fun(x):
testx = bestx
testx[i] = x
return fun(testx)
bracket = (bounds[i][0], bestx[i], bounds[i][1])
optresult = minimize_scalar(scalar_fun, bracket=bracket, tol=tol, method=scalar_gap_filling_minimizer, options=scalar_options)
# if verbose:
# logging.info('minimize_scalar returned x=%f, y=%f' % (optresult.x, optresult.fun))
bestx[i] = optresult.x
besty = optresult.fun
if verbose:
logging.info('multivariate_gap_filling_minimizer: niter=%d, dim=%d, best f(%s) = %f' % (niter, i, str(bestx), besty))
funcalls += optresult.nfev
# if verbose:
# logging.info('multivariate_near_convex_minimizer: niter=%d, best f(%s) = %f' % (niter, str(bestx), besty))
if maxfev is not None and funcalls >= maxfev:
break
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def global_minimizer_spark(fun, x0=None, kwargs_list=[], kwargs_default={}, sc=None, callback=None,
final_eval_kwargs=None, return_all_as_list=False, return_all_as_dataframe=False, **options):
"""Minimize an arbitrary function by evaluating all possible points in parallel using Spark.
Args:
fun: A function that takes kwargs as input and outputs y. If y is a tuple, the first element must be the value
to minimize. Subsequent values, if any, will be returned with the result allowing extra information to be returned.
The value to minimize can be any comparable data type.
kwargs_list (list(dict)): List of x points at which to evaluate function.
Elements are dicts that represent kwargs to pass to the function.
kwargs_default (dict): Optional parameters to pass to each function call.
sc (SparkContext): The SparkContext to parallelize the function evaluations.
return_all_evals (bool): If True, also returns all function evaluations as a list of (y, x) tuples.
This will be returned in the fun_eval_list parameter.
final_eval_kwargs (dict): If specified, re-evaluate the function at the minimum point but with this
additional set of kwargs.
x0: Not used but needed as a placeholder when called by scipy.optimize.minimize.
"""
# Create RDD of function parameters.
# We maintain two sets of parameters.
# kwargs_point varies between points.
# kwargs_full includes kwarg_point and adds kwargs_default.
params = []
for kwargs_point in kwargs_list:
kwargs_full = kwargs_default.copy()
kwargs_full.update(kwargs_point)
params.append((kwargs_point, kwargs_full))
params_rdd = sc.parallelize(params)
# Evaluate function at each point in parallel using Spark.
fun_eval_rdd = params_rdd.map(lambda param: (fun(**param[1]), param[0]))
# Find the minimum y value. Secondary sort on the x value for tie breaking.
best_y, best_x_kwargs = fun_eval_rdd.min(lambda x: (x[0][0],x[1]) if isinstance(x[0],tuple) else (x[0],x[1]))
result = OptimizeResult(x=best_x_kwargs, nfev=len(params), success=True)
if return_all_as_list:
fun_eval_list = fun_eval_rdd.collect()
result['fun_eval_list'] = fun_eval_list
if return_all_as_dataframe:
fun_eval_list = fun_eval_rdd.collect()
df = pd.DataFrame([x for y,x in fun_eval_list])
df['fun'] = [y[0] for y,x in fun_eval_list]
result['df'] = df
if final_eval_kwargs:
kwargs_full = kwargs_default.copy()
kwargs_full.update(best_x_kwargs)
kwargs_full.update(final_eval_kwargs)
best_y = fun(**kwargs_full)
result['fun'] = best_y
return result
#
# Misc functions
#
def regex_groups(regex, s, return_on_no_match=None, flags=0, search=False):
if isinstance(s, six.string_types):
if search:
m = re.search(regex, s, flags=flags)
else:
m = re.match(regex, s, flags=flags)
if m:
return m.groups()
return return_on_no_match
def regex_first_group(regex, s, return_on_no_match=None, flags=0, search=False):
g = regex_groups(regex, s, return_on_no_match=[return_on_no_match], flags=flags, search=search)
return g[0]
def print_arg(x):
print(x)
return x
def make_hash(o):
"""
Makes a hash from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
Based on http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
"""
return hash(make_hashable(o))
def make_hashable(o):
"""
Makes a hashable object from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
Based on http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
"""
if isinstance(o, (set, tuple, list, np.ndarray)):
return tuple([make_hashable(e) for e in o])
try:
if np.isnan(o):
# This prevents np.nan to have same hash as 0 or False.
return '1b3c4484-61dd-4623-b60a-a4789eacecbd'
except:
pass
try:
if pd.isnull(o):
# This prevents None to have same hash as 0 or False. Also ensures that hash(None) is consistent.
return 'c3fe5446-ec19-4ec8-807a-8e739f9fc6b6'
except:
pass
if isinstance(o, dict):
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hashable(v)
return tuple(frozenset(sorted(new_o.items())))
else:
return o
def force_utc(t):
if isinstance(t,pd.Timestamp):
if t.tz:
result = t.tz_convert('UTC')
else:
result = t.tz_localize('UTC')
else:
result = pd.to_datetime(t, utc=True)
if type(result) != pd.tslib.Timestamp and type(result) != pd.tslib.NaTType:
warnings.warn('force_utc: Conversion from %s resulted in type %s' % (type(t), type(result)))
assert False
return result
def linear_transform(x, x0, x1, y0, y1):
return (x - x0) * (y1 - y0) / (x1 - x0) + y0
def linear_fit_to_2_points(point0, point1):
"""Return a linear estimator that passes through the points."""
x0,y0 = point0
x1,y1 = point1
return lambda x: (x - x0) * (y1 - y0) / (x1 - x0) + y0
def read_overrides(override_files):
"""Read files in override directory."""
filenames = glob_file_list(override_files)
filenames.sort()
dict_list = [load_json_from_file(filename) for filename in filenames]
override_dict = merge_dicts(*dict_list)
return override_dict
def make_namedtuple(**kwargs):
"""Quickly make a namedtuple instance.
Alternatively, use pd.Series(dict())."""
cls = namedtuple('namedtuple', kwargs.keys())
return cls(*kwargs.values())
def instance_method_wrapper(obj, method_name, *args, **kwargs):
"""Wraps an object instance method within a static method.
This can be used when pickling is needed such as for multiprocessing.
Example:
from sklearn.externals.joblib import Parallel, delayed
pjobs = [delayed(instance_method_wrapper)(self, 'instance_method_name', arg1, arg2) for i in range(3)]
return Parallel(n_jobs=1)(pjobs)
"""
method = getattr(obj, method_name)
return method(*args, **kwargs)
def enable_logging(level=logging.INFO, warnlevel=logging.WARN):
"""Enable logging to iPython notebook."""
rootLogger = logging.getLogger()
rootLogger.setLevel(level)
while rootLogger.handlers:
rootLogger.removeHandler(rootLogger.handlers[0])
errHandler = logging.StreamHandler(sys.stderr)
errHandler.setLevel(warnlevel)
errHandler.setFormatter(logging.Formatter('[%(levelname)-5.5s] [%(module)s] %(message)s'))
rootLogger.addHandler(errHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setLevel(level)
consoleHandler.setFormatter(logging.Formatter('[%(levelname)-5.5s] [%(module)s] %(message)s'))
rootLogger.addHandler(consoleHandler)
def disable_logging():
enable_logging(level=logging.WARN)
#
# End
#
```
#### File: p3_test_driver/disabled_plugins/p3_test_sql.py
```python
from __future__ import division
import datetime
import logging
import multiprocessing
import random
import re
import sys
import uuid
import queue as Queue
# P3 Libraries
from p3_test_driver import p3_plugin_manager
from p3_test_driver.hadoop_util import kill_all_yarn_jobs, kill_yarn_job
from p3_test_driver.p3_test import TimeoutException
from p3_test_driver.p3_test_hadoop import HadoopTest
from p3_test_driver.p3_util import regex_first_group, record_result, read_file_to_string, glob_file_list
from p3_test_driver.system_command import system_command, time_duration_to_seconds
_default_configs = {
'sqlbatch': {
'queries_per_stream': 0,
'random_seed': 0,
'stream_count': 1,
}
}
class PluginInfo(p3_plugin_manager.IP3Plugin):
def get_plugin_info(self):
return [
{
'class_type': 'test',
'class_name': 'sqlquery',
'class': SqlQueryTest,
},
{
'class_type': 'test',
'class_name': 'sqlbatch',
'class': SqlBatchTest,
},
]
class SqlTest(HadoopTest):
def __init__(self, test_config, default_configs=_default_configs):
super(SqlTest, self).__init__(test_config, default_configs=default_configs)
def configure_environment(self):
config = self.test_config
super(SqlTest, self).configure_environment()
db_type = config['db_type']
if db_type == 'hawq':
if config.get('restart_hawq',False):
system_command('/etc/init.d/hawq stop')
system_command('/etc/init.d/hawq start')
elif db_type == 'impala':
cmd = []
cmd.extend(['impala-shell'])
cmd.extend(['--impalad', '%s:%d' % (config.get('impalad_host','localhost'), config.get('impalad_port',21000))])
cmd.extend(['--database', self.db_name()])
cmd.extend(['-q', 'invalidate metadata'])
system_command(cmd,
print_command=True,
print_output=True,
raise_on_error=True,
shell=False)
def db_name(self):
config = self.test_config
return config['db_name'] % config
class SqlQueryTest(SqlTest):
def __init__(self, test_config, default_configs=_default_configs):
super(SqlQueryTest, self).__init__(test_config, default_configs=default_configs)
def run_test(self):
config = self.test_config
# If run in the optimizer, we don't want to run much longer than the best sample.
if config.get('optimizer_set_timeout') and 'optimizer_best_query_elapsed_sec_mean' in config and 'optimizer_best_query_elapsed_sec_std' in config:
max_query_elapsed_sec = config['optimizer_best_query_elapsed_sec_mean'] + 3*config['optimizer_best_query_elapsed_sec_std']
config['command_timeout_sec'] = 30.0 + max_query_elapsed_sec
logging.info('SqlQueryTest.run_test: Setting command timeout to %0.0f seconds' % config['command_timeout_sec'])
config['_status_node'].set_status('Running query %s' % config['query_filename'])
self.hadoop_authenticate()
self.configure_environment()
with self.metrics_collector_context():
self.start_metrics()
rec = run_query(config)
record_result(rec, rec['result_filename'])
if rec['command_timed_out']:
raise TimeoutException()
if rec['error']:
raise Exception('Query failed')
class SqlBatchTest(SqlTest):
def __init__(self, test_config, default_configs=_default_configs):
super(SqlBatchTest, self).__init__(test_config, default_configs=default_configs)
def run_test(self):
config = self.test_config
config['root_test_uuid'] = config['test_uuid']
child_messages = {}
# Create random query list for each stream
config['query_filenames'] = sorted(glob_file_list(config['query_filespec']))
random.seed(config['random_seed'])
stream_configs = []
queries_per_stream = config.get('queries_per_stream',0)
for stream_id in range(0, config.get('stream_count',1)):
stream_config = config.copy()
stream_config['stream_id'] = stream_id
if config['random_seed'] != 0:
random.shuffle(stream_config['query_filenames'])
if queries_per_stream > 0:
stream_config['query_filenames'] = stream_config['query_filenames'][0:queries_per_stream]
logging.info('Queries for stream %d: %s' % (stream_config['stream_id'], ' '.join(stream_config['query_filenames'])))
stream_configs.append(stream_config)
self.hadoop_authenticate()
self.configure_environment()
with self.metrics_collector_context():
self.start_metrics()
error_count = 0
success_count = 0
t0 = datetime.datetime.utcnow()
# Start stream processes
active_streams = {}
queue = multiprocessing.Queue()
for stream_config in stream_configs:
stream_config = stream_config.copy()
del stream_config['_status_node'] # We can't send this between processes.
stream_id = stream_config['stream_id']
process = multiprocessing.Process(target=run_query_stream, args=(queue, stream_config))
process.start()
active_streams[stream_id] = {'process': process, 'stream_config': stream_config}
# Monitor stream processes
while len(active_streams.keys()) > 0:
# Update status
status_text = 'successful queries=%d, errors=%d' % (success_count, error_count)
status_node = config['_status_node']
status_node.set_status(status_text, destroy_children=False)
# Handle any completed stream processes
for stream_id in active_streams.keys():
process = active_streams[stream_id]['process']
if not process.is_alive():
logging.info('Stream %d is done' % stream_id)
process.join()
return_code = process.exitcode
if return_code != 0:
# An uncaught exception has occured. Normal query failures are not handled here.
logging.error('Stream %d returned error %d' % (stream_id, return_code))
error_count += 1
del active_streams[stream_id]
# Process messages (individual query results, stream results) from stream processes
try:
while True:
# Wait up to 1 second for next message in queue.
message = queue.get(True, 1)
# Create a new test_uuid for this child record.
# The query batch test_uuid is in root_test_uuid.
message['record_uuid'] = str(uuid.uuid4())
message['test_uuid'] = message['record_uuid']
# Record individual message to a file for immediate visibility.
record_result(message, message['result_filename'])
# Also add to child_messages key of the query batch record.
record_type = message['record_type']
if record_type not in child_messages:
child_messages[record_type] = []
child_messages[record_type].append(message)
# Count successful and error queries.
if message['record_type'] == 'query_result':
if message['error']:
error_count += 1
else:
success_count += 1
except Queue.Empty:
pass
except KeyboardInterrupt:
raise
except:
logging.error('Unexpected error: %s' % sys.exc_info()[0])
t1 = datetime.datetime.utcnow()
td = t1 - t0
logging.info('All streams are done')
rec = config.copy()
rec['record_uuid'] = rec['test_uuid']
rec['record_type'] = 'query_batch_summary'
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = (error_count > 0)
rec['child_messages'] = child_messages
record_result(rec, rec['result_filename'])
logging.info('successful queries=%d, errors=%d' % (success_count, error_count))
if rec['error']:
raise Exception('Query batch failed')
def run_query_stream(queue, stream_config):
stream_id = stream_config['stream_id']
logging.info('%d: Stream begin' % stream_id)
t0 = datetime.datetime.utcnow()
stream_error = False
for query_index, query_filename in enumerate(stream_config['query_filenames']):
logging.info('%d: query_index=%d, query_filename=%s' % (stream_id, query_index, query_filename))
query_config = stream_config.copy()
del query_config['query_filenames']
query_config['query_index'] = query_index
query_config['query_filename'] = query_filename
run_query(query_config)
if query_config['error']: stream_error = True
# Place query_result record in queue. These will be collected and recorded by SqlBatchTest.run_test().
queue.put(query_config)
t1 = datetime.datetime.utcnow()
td = t1 - t0
rec = stream_config.copy()
rec['record_type'] = 'query_stream_summary'
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = stream_error
# Place query_stream_summary record in queue. These will be collected and recorded by SqlBatchTest.run_test().
queue.put(rec)
logging.info('%d: Stream end' % stream_id)
def run_query(query_config):
rec = query_config
print_output = rec.get('print_output',True)
stream_id = rec.get('stream_id', 0)
rec['db_name'] = rec['db_name'] % rec
if rec.get('kill_all_yarn_jobs_before_each_query',False):
kill_all_yarn_jobs()
rec['query_filename_contents'] = read_file_to_string(rec['query_filename'])
shell = False
db_type = rec['db_type']
# Build query command.
if db_type == 'hawq':
cmd = []
cmd.extend(['psql'])
cmd.extend(['-v', 'ON_ERROR_STOP=1'])
cmd.extend(['-d', rec['db_name']])
cmd.extend(['-tAf', rec['query_filename']])
elif db_type == 'hive':
if not 'hiveconf:hive.tez.java.opts' in rec and 'java_opts_xmx_ratio' in rec and 'hiveconf:hive.tez.container.size' in rec:
rec['hiveconf:hive.tez.java.opts'] = '-Xmx%dm' % (rec['hiveconf:hive.tez.container.size'] * rec['java_opts_xmx_ratio'])
hiveconf = []
for k,v in rec.items():
prop = regex_first_group('^hiveconf:(.*)', k)
if prop:
hiveconf.extend(['--hiveconf','"%s=%s"' % (prop, v)])
cmd = []
cmd.extend(['hive'])
cmd.extend(['--database', rec['db_name']])
cmd.extend(['-f', rec['query_filename']])
if 'hive_init_file' in rec:
cmd.extend(['-i', rec['hive_init_file']])
# Record contents of file in result.
rec['hive_init_file_contents'] = read_file_to_string(rec['hive_init_file'])
cmd.extend(hiveconf)
elif db_type == 'impala':
cmd = []
cmd.extend(['impala-shell'])
cmd.extend(['--impalad', '%s:%d' % (rec.get('impalad_host','localhost'), rec.get('impalad_port',21000))])
cmd.extend(['--database', rec['db_name']])
cmd.extend(['-f', rec['query_filename']])
cmd.extend(['-B']) # turn off pretty printing
cmd.extend(['-o', '/dev/null'])
if rec.get('profile_query'):
cmd.extend(['--show_profiles'])
else:
raise('Unknown db_type')
logging.info('%d: # %s' % (stream_id, ' '.join(cmd)))
rec['query_command'] = cmd
t0 = datetime.datetime.utcnow()
# Run query.
return_code, output, errors = system_command(cmd,
print_command=False,
print_output=print_output,
timeout=rec.get('command_timeout_sec',None),
raise_on_error=False,
shell=shell)
t1 = datetime.datetime.utcnow()
td = t1 - t0
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = (return_code != 0)
rec['exit_code'] = return_code
rec['command_timed_out'] = (return_code == -1)
rec['output'] = output
rec['errors'] = errors
rec['record_type'] = 'query_result'
# Parse query output to determine elapsed time and rows returned.
if db_type == 'hive':
rec['application_id'] = regex_first_group('\\(Executing on YARN cluster with App id (application_.*)\\)$',
errors, return_on_no_match=None, search=True, flags=re.MULTILINE)
# Extract actual query duration from stderr text. Note that we must find the last occurance of 'Time taken'.
query_elapsed_sec = regex_first_group(
'Time taken: ([0-9.]+) seconds',
errors, return_on_no_match='nan', search=True, flags=re.MULTILINE, match_last=True)
if query_elapsed_sec == 'nan':
logging.warn('Time taken not returned by command.')
rec['error'] = True
rec['query_elapsed_sec'] = float(query_elapsed_sec)
rec['non_query_elapsed_sec'] = rec['elapsed_sec'] - rec['query_elapsed_sec']
# Extract row count from stderr text. Note that some queries will not report fetched rows.
query_rows_returned = regex_first_group(
'Fetched: ([0-9]+) row',
errors, return_on_no_match='0', search=True, flags=re.MULTILINE)
rec['query_rows_returned'] = int(query_rows_returned)
logging.info('error=%d, query_elapsed_sec=%f, non_query_elapsed_sec=%f, query_rows_returned=%d' %
(rec['error'], rec['query_elapsed_sec'], rec['non_query_elapsed_sec'], rec['query_rows_returned']))
elif db_type == 'impala':
# Extract actual query duration from stderr text.
# Fetched 100 row(s) in 0.98s
query_elapsed_sec = regex_first_group(
'Fetched [0-9]+ row\\(s\\) in ([0-9.]+)s',
errors, return_on_no_match='nan', search=True, flags=re.MULTILINE, match_last=True)
if query_elapsed_sec == 'nan':
logging.warn('Time taken not returned by command.')
rec['error'] = True
rec['query_elapsed_sec'] = float(query_elapsed_sec)
rec['non_query_elapsed_sec'] = rec['elapsed_sec'] - rec['query_elapsed_sec']
# Extract row count from stderr text. Note that some queries will not report fetched rows.
query_rows_returned = regex_first_group(
'Fetched ([0-9]+) row\\(s\\)',
errors, return_on_no_match='0', search=True, flags=re.MULTILINE)
rec['query_rows_returned'] = int(query_rows_returned)
logging.info('error=%d, query_elapsed_sec=%f, non_query_elapsed_sec=%f, query_rows_returned=%d' %
(rec['error'], rec['query_elapsed_sec'], rec['non_query_elapsed_sec'], rec['query_rows_returned']))
else:
rec['query_elapsed_sec'] = rec['elapsed_sec']
rec['non_query_elapsed_sec'] = 0.0
rec['query_rows_returned'] = np.nan
# Handle errors.
if rec['error']:
logging.info('%d: return_code=%d' % (stream_id, return_code))
if not print_output:
logging.info('%d: %s' % (stream_id, output))
if db_type == 'hive':
# Kill YARN application
if rec['application_id']:
kill_yarn_job(rec['application_id'])
if errors != '':
if not print_output:
logging.info('%d: %s' % (stream_id, errors))
if not rec['error']:
logging.info('%d: %s: %0.3f seconds' % (stream_id, rec['query_filename'], rec['elapsed_sec']))
return rec
```
#### File: p3_test_driver/p3_test_driver/p3_plugin_manager.py
```python
from __future__ import division
import logging
import os
from yapsy.PluginManager import PluginManager
from yapsy.PluginFileLocator import PluginFileLocator, PluginFileAnalyzerMathingRegex
from yapsy.IPlugin import IPlugin
from . import p3_test_driver
_plugin_toc = {}
def _get_item_key(class_type, class_name):
return '%s:%s' % (class_type, class_name)
def _get_plugin_dirs():
logging.info(p3_test_driver.__file__)
maindir = os.path.dirname(os.path.realpath(p3_test_driver.__file__))
return [
os.path.join(maindir, 'plugins'),
os.path.join(os.getcwd(), 'p3_test_driver_plugins'),
]
def scan_plugins():
global _plugin_toc
_plugin_toc = {}
# Load the plugins from the plugin directory.
analyzer = PluginFileAnalyzerMathingRegex('', '.*\\.py$')
plugin_locator = PluginFileLocator(analyzers=[analyzer])
manager = PluginManager(plugin_locator=plugin_locator)
plugin_dirs = _get_plugin_dirs()
logging.info('Loading plugins in %s' % str(plugin_dirs))
manager.setPluginPlaces(plugin_dirs)
manager.collectPlugins()
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
plugin_info = plugin.plugin_object.get_plugin_info()
for item in plugin_info:
k = _get_item_key(item['class_type'], item['class_name'])
_plugin_toc[k] = item
logging.debug('p3_plugin_manager: _plugin_toc=%s' % _plugin_toc)
def get_class_property(class_type, class_name, property_name, default=None):
global _plugin_toc
k = _get_item_key(class_type, class_name)
print('k=%s, property_name=%s' % (k, property_name))
value = _plugin_toc[k].get(property_name, default)
print('value=%s' % value)
return value
def get_class(class_type, class_name):
global _plugin_toc
# print('p3_plugin_manager: _plugin_toc=%s' % _plugin_toc)
# print('class_type=%s class_name=%s' %(class_type, class_name))
k = _get_item_key(class_type, class_name)
return _plugin_toc[k]['class']
class IP3Plugin(IPlugin):
def get_plugin_info(self):
return []
```
#### File: p3_test_driver/plugins/p3_storage_isilon.py
```python
from __future__ import division
import json
import logging
import requests
# P3 Libraries
from p3_test_driver import p3_plugin_manager
from p3_test_driver.p3_storage import StorageBase
from p3_test_driver.system_command import ssh
from p3_test_driver.p3_util import unflatten_dict_keys, regex_first_group
class PluginInfo(p3_plugin_manager.IP3Plugin):
def get_plugin_info(self):
return [
{'class_type': 'storage', 'class_name': 'isilon', 'class': IsilonStorage},
]
class IsilonStorage(StorageBase):
def configure(self):
config = self.config
self.get_isilon_version()
# TODO: Need to set HDFS block size for specific access zone.
block_size_MiB = config.get('block_size_MiB', None)
if block_size_MiB:
self.run_isilon_command('isi hdfs settings modify --default-block-size %dMB' % block_size_MiB)
# Note that --server-threads is not supported on OneFS 8.0.
isilon_hdfs_server_threads = config.get('isilon_hdfs_server_threads', None)
if isilon_hdfs_server_threads:
self.run_isilon_command('isi hdfs settings modify --server-threads %d' % isilon_hdfs_server_threads)
# Note that --server-log-level is not supported on OneFS 8.0.
isilon_hdfs_log_level = config.get('isilon_hdfs_log_level', None)
if isilon_hdfs_log_level:
self.run_isilon_command('isi hdfs settings modify --server-log-level %s' % isilon_hdfs_log_level)
sysctl_settings = unflatten_dict_keys(config, 'isilon_sysctl_(.*)')
if sysctl_settings:
args = ['isi_sysctl_cluster %s="%s"' % (k,v) for k,v in sysctl_settings.iteritems()]
cmd = ' && '.join(args)
self.run_isilon_command(cmd)
# TODO: Need to bring in code for resizing cluster from hadoop-test-driver.pl
requested_isilon_num_nodes = config.get('isilon_num_nodes')
if requested_isilon_num_nodes:
self.get_isilon_node_info()
actual_isilon_num_nodes = self.get_num_isilon_nodes(config['isilon_node_info'])
logging.info('IsilonStorage.configure: actual_isilon_num_nodes=%d' % actual_isilon_num_nodes)
if actual_isilon_num_nodes != requested_isilon_num_nodes:
raise Exception('Actual isilon_num_nodes (%d) does not match requested isilon_num_nodes (%d)' % (actual_isilon_num_nodes, requested_isilon_num_nodes))
self.get_info()
self.flush_cache()
def get_info(self):
config = self.config
self.get_isilon_node_info()
self.get_isilon_access_zone_info()
self.get_isilon_network_interface_info()
config['isilon_num_nodes'] = self.get_num_isilon_nodes(config['isilon_node_info'])
config['storage_num_nodes'] = config['isilon_num_nodes']
if self.have_minimum_isilon_version((8,0)):
config['isilon_onefs_patches'] = json.loads(self.run_isilon_command('isi upgrade patches list --format json')[1])
else:
config['isilon_onefs_pkg_info'] = self.run_isilon_command('isi pkg info')[1]
config['isilon_hdfs_racks'] = json.loads(self.run_isilon_command('isi hdfs racks list --format json')[1])
config['isilon_status'] = self.run_isilon_command('isi status')[1]
config['isilon_job_status'] = self.run_isilon_command('isi job status')[1]
def flush_cache(self):
config = self.config
if config.get('noop', False) and config.get('isilon_flush', False):
self.run_isilon_command('isi_for_array isi_flush')
def run_isilon_command(self, cmd, *args, **kwargs):
config = self.config
assert 'isilon_user' in config
assert 'isilon_host' in config
if config.get('noop', False):
logging.info('# ssh ' % cmd)
return 0, ''
else:
return ssh(config['isilon_user'], config['isilon_host'], cmd, stderr_to_stdout=False, *args, **kwargs)
def get_isilon_node_info(self, force=False):
config = self.config
if force or 'isilon_node_info' not in config:
base_isilon_url = 'https://%s:8080' % config['isilon_host']
url = '%s/platform/1/storagepool/nodepools/%s' % (base_isilon_url, config['isilon_node_pool_name'])
info = requests.get(url, auth=(config['isilon_user'], config['_isilon_password']), verify=False).json()
config['isilon_node_info'] = info
return config['isilon_node_info']
def get_isilon_access_zone_info(self):
config = self.config
base_isilon_url = 'https://%s:8080' % config['isilon_host']
url = '%s/platform/1/zones' % base_isilon_url
info = requests.get(url, auth=(config['isilon_user'], config['_isilon_password']), verify=False).json()
# Print(json.dumps(info, sort_keys=True, indent=4, ensure_ascii=False))
config['isilon_access_zone_info'] = info
return info
def get_isilon_network_interface_info(self):
config = self.config
if self.have_minimum_isilon_version((8,0)):
cmd = 'isi network interfaces list --verbose --show-inactive'
else:
cmd = 'isi networks list interfaces --verbose --wide --show-inactive'
exit_code, output, errors = self.run_isilon_command(cmd, print_output=False)
if exit_code != 0:
raise Exception('Unable to get Isilon network interface info.')
config['isilon_network_interface_info'] = output
return output
def get_isilon_node_ids(self, isilon_node_info):
return sorted(isilon_node_info['nodepools'][0]['lnns'])
def get_num_isilon_nodes(self, isilon_node_info):
count = len(self.get_isilon_node_ids(isilon_node_info))
logging.info('get_num_isilon_nodes: count=%d' % count)
return count
def get_isilon_version(self):
config = self.config
if not 'isilon_onefs_version' in config:
config['isilon_onefs_version'] = self.run_isilon_command('isi version')[1].replace('\n','')
version_tuple = self.get_isilon_version_tuple(config['isilon_onefs_version'])
logging.debug('Isilon version tuple=%s' % str(version_tuple))
return config['isilon_onefs_version']
def get_isilon_version_tuple(self, isi_version_output):
def try_int(x):
try:
return int(x)
except:
return x
s = regex_first_group('.*Isilon OneFS v(.*?) ', isi_version_output)
return tuple(try_int(d) for d in s.split('.'))
def have_minimum_isilon_version(self, version_tuple):
return self.get_isilon_version_tuple(self.get_isilon_version()) >= version_tuple
```
#### File: p3_test_driver/plugins/p3_test_dummy.py
```python
import time
from p3_test_driver import p3_plugin_manager
from p3_test_driver.p3_test import BaseTest
class PluginInfo(p3_plugin_manager.IP3Plugin):
def get_plugin_info(self):
return [
{
'class_type': 'test',
'class_name': 'dummytest',
'class': DummyTest,
},
]
class DummyTest(BaseTest):
def __init__(self, test_config):
default_configs = {
'all': {'a': 'from all', 'b': 'from all', 'd': 'from all'},
'dummytest': {'a': 'from dummytest', 'c': 'from dummytest'},
}
super(DummyTest, self).__init__(test_config, default_configs)
def run_test(self):
config = self.test_config
print('DummyTest.run_test')
config['_status_node'].set_status('this is DummyTest.run_test')
time.sleep(config.get('sleep_sec',5))
```
#### File: p3_test_driver/plugins/p3_test_simple.py
```python
from __future__ import division
import datetime
import json
import logging
import os
import re
# P3 Libraries
from p3_test_driver import p3_plugin_manager
from p3_test_driver.p3_test import TimeoutException, StorageTest
from p3_test_driver.p3_util import record_result
from p3_test_driver.system_command import system_command, time_duration_to_seconds
_default_configs = {
'simple': {
'print_output': True,
},
}
class PluginInfo(p3_plugin_manager.IP3Plugin):
def get_plugin_info(self):
return [
{
'class_type': 'test',
'class_name': 'simple',
'class': SimpleTest,
},
]
class SimpleTest(StorageTest):
"""P3 generic test class that can be used to run tests consisting of a single command."""
def __init__(self, test_config, default_configs=_default_configs):
super(SimpleTest, self).__init__(test_config, default_configs=default_configs)
def configure_environment(self):
self.configure_storage()
def run_test(self):
rec = self.test_config
self.configure_environment()
# Build environment for commands.
env = None
command_env = rec.get('command_env')
if command_env:
env = dict(os.environ)
env.update(command_env)
# Run pre-commands.
for pre_command in rec.get('pre_commands', []):
pre_command_template = pre_command['command_template']
if isinstance(pre_command_template, list):
cmd = [x % rec for x in pre_command_template]
else:
cmd = pre_command_template % rec
return_code, output, errors = system_command(cmd,
print_command=True,
print_output=True,
timeout=rec.get('command_timeout_sec',None),
raise_on_error=True,
shell=not isinstance(cmd, list),
noop=False,
env=env)
if 'key' in pre_command:
rec[pre_command['key']] = output.rstrip()
# Build command from command template.
if 'command' not in rec and 'command_template' in rec:
if isinstance(rec['command_template'], list):
rec['command'] = [x % rec for x in rec['command_template']]
else:
rec['command'] = rec['command_template'] % rec
cmd = rec['command']
if 'command_shell' not in rec:
rec['command_shell'] = not isinstance(cmd, list)
rec['_status_node'].set_status('Running command: %s' % str(cmd))
with self.metrics_collector_context():
self.start_metrics()
t0 = datetime.datetime.utcnow()
return_code, output, errors = system_command(cmd,
print_command=True,
print_output=rec['print_output'],
timeout=rec.get('command_timeout_sec',None),
raise_on_error=False,
shell=rec['command_shell'],
noop=rec['noop'],
env=env)
t1 = datetime.datetime.utcnow()
td = t1 - t0
logging.info('exit_code=%d' % return_code)
# Parse any output matching a regex pattern in the json_regex list.
for json_regex in rec.get('json_regex', []):
m = re.search(json_regex, output, flags=re.MULTILINE)
if m:
json_str = m.groups()[0]
d = json.loads(json_str)
rec.update(d)
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = (return_code != 0)
rec['exit_code'] = return_code
rec['command_timed_out'] = (return_code == -1)
rec['output'] = output
rec['errors'] = errors
rec['run_as_test'] = rec['test']
if 'record_as_test' in rec:
rec['test'] = rec['record_as_test']
if 'result_filename' in rec:
record_result(rec, rec['result_filename'])
if rec['command_timed_out']:
raise TimeoutException()
if rec['error']:
raise Exception('Command failed')
```
#### File: tests/perf-pravega-tests/testgen_pravega_RH.py
```python
from __future__ import print_function
import json
import sys
# Generates 44 tests and takes ~185m (duration: 2min)
def add_test():
driver = {
'name': 'Pravega',
'driverClass': 'io.openmessaging.benchmark.driver.pravega.PravegaBenchmarkDriver',
'client': {
'controllerURI': 'tcp://pravega-pravega-controller:9090',
'scopeName': 'p3tests',
},
'writer': {
'enableConnectionPooling': False,
},
'enableTransaction': False,
'includeTimestampInEvent': True
}
workload = {
'messageSize': messageSize,
'topics': topics,
'partitionsPerTopic': partitionsPerTopic,
'subscriptionsPerTopic': subscriptionsPerTopic,
'consumerPerSubscription': consumerPerSubscription,
'producersPerTopic': producersPerTopic,
'producerRate': producerRateEventsPerSec,
'consumerBacklogSizeGB': consumerBacklogSizeGB,
'testDurationMinutes': testDurationMinutes,
'keyDistributor': 'NO_KEY',
}
t = dict(
test='openmessaging-benchmark-k8s',
max_test_attempts=1,
driver=driver,
workload=workload,
numWorkers=numWorkers,
localWorker=localWorker,
tarball=tarball,
image=image,
ombHelmPath=ombHelmPath,
namespace=namespace,
build=build,
undeploy=True,
)
test_list.append(t)
test_list = []
localWorker = False
namespace = 'default'
ombHelmPath = '../deployment/kubernetes/helm/pulsar-benchmark'
image = 'devops-repo.isus.emc.com:8116/maria/omb:master-0.8.0-plots'
tarball = '../package/target/openmessaging-benchmark-0.0.1-SNAPSHOT-bin.tar.gz'
build = False
# Message size 10k 16 partitionsPerTopic 16 tests
# for repeat in range(1):
# for producerWorkers in [2]:
# numWorkers = 0 if localWorker else producerWorkers*2
# for testDurationMinutes in [2]:
# for messageSize in [10000]:
# for producerRateEventsPerSec in [1e2, 5e2, 1e3, 1e4, 6e3, 3e3, 5e3, 9e3, 15e3, 3e4, 25e3, 35e3, 2e4]:
# for topics in [4]:
# for partitionsPerTopic in [1]:
# for producersPerWorker in [2]:
# producersPerTopic = int(producersPerWorker * producerWorkers)
# for consumerBacklogSizeGB in [0]:
# for subscriptionsPerTopic in [1]:
# for consumerPerSubscription in [producersPerTopic]:
# add_test()
#
# # Message size 10k 1 partitionsPerTopic 16 tests
# for repeat in range(1):
# for producerWorkers in [2]:
# numWorkers = 0 if localWorker else producerWorkers*2
# for testDurationMinutes in [2]:
# for messageSize in [10000]:
# for producerRateEventsPerSec in [1e2, 5e2, 1e3, 1e4, 6e3, 3e3, 5e3, 9e3, 15e3, 3e4, 25e3, 35e3, 2e4]:
# for topics in [4]:
# for partitionsPerTopic in [16]:
# for producersPerWorker in [2]:
# producersPerTopic = int(producersPerWorker * producerWorkers)
# for consumerBacklogSizeGB in [0]:
# for subscriptionsPerTopic in [1]:
# for consumerPerSubscription in [producersPerTopic]:
# add_test()
# Message size 100 B 16 partitionsPerTopic 9 tests
# for repeat in range(1):
# for producerWorkers in [2]:
# numWorkers = 0 if localWorker else producerWorkers*2
# for testDurationMinutes in [2]:
# for messageSize in [100]:
# for producerRateEventsPerSec in [1e6, 1e2, 9e5, 5e2, 5e5, 1e3, 6e5, 1e4, 6e4, 5e3, 5e4, 6e3, 7e3, 71e2, 73e2, 74e2, 75e2, 8e3, 9e3, 11e3, 12e3, 13e3, 15e3]:
# for topics in [4]:
# for partitionsPerTopic in [1]:
# for producersPerWorker in [2]:
# producersPerTopic = int(producersPerWorker * producerWorkers)
# for consumerBacklogSizeGB in [0]:
# for subscriptionsPerTopic in [1]:
# for consumerPerSubscription in [producersPerTopic]:
# add_test()
# Message size 100 B 1 partitionsPerTopic 9 tests
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [2]:
for messageSize in [100]:
for producerRateEventsPerSec in [4e4, 1e6, 3e6, 4e6, 45e5]:
for topics in [1]:
for partitionsPerTopic in [16]:
for producersPerWorker in [1]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [producersPerTopic]:
add_test()
'''
# Message size 100 B 16 low rate tests
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [2]:
for messageSize in [100]:
for producerRateEventsPerSec in [1e2, 5e2, 1e3, 5e3, 6e3, 7e3, 71e2, 73e2, 74e2, 75e2, 8e3, 9e3]: #[1e2, 5e2, 1e3, 5e3, 6e3, 7e3, 8e3, 9e3, 1e4, 11e3, 12e3, 13e3, 15e3]:
for topics in [4]:
for partitionsPerTopic in [16]:
for producersPerWorker in [2]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [producersPerTopic]:
add_test()
'''
print(json.dumps(test_list, sort_keys=True, indent=4, ensure_ascii=False))
print('Number of tests generated: %d' % len(test_list), file=sys.stderr)
```
#### File: p3_test_driver/tests/testgen_kafka_ssh.py
```python
from __future__ import print_function
import json
import sys
def add_test():
driver = {
'name': 'Kafka',
'driverClass': 'io.openmessaging.benchmark.driver.kafka.KafkaBenchmarkDriver',
'replicationFactor': 3,
'commonConfig':
"bootstrap.servers=127.0.0.1",
'topicConfig':
"min.insync.replicas=3\n"
# This is for syncing to disk
# "flush.messages=1\n"
# "flush.ms=0\n"
,
'producerConfig':
"acks=all\n"
"linger.ms=1\n"
"batch.size=131072\n"
# This is for transaction
# "enableTransaction=True\n"
# "eventPerTransaction=100\n"
# "enable.idempotence=true"
,
'consumerConfig':
"auto.offset.reset=earliest\n"
"enable.auto.commit=false",
}
workload = {
'messageSize': messageSize,
'topics': topics,
'partitionsPerTopic': partitionsPerTopic,
'subscriptionsPerTopic': subscriptionsPerTopic,
'consumerPerSubscription': consumerPerSubscription,
'producersPerTopic': producersPerTopic,
'producerRate': producerRateEventsPerSec,
'consumerBacklogSizeGB': consumerBacklogSizeGB,
'testDurationMinutes': testDurationMinutes,
'keyDistributor': 'NO_KEY',
}
t = dict(
test='openmessaging-benchmark',
max_test_attempts=1,
driver=driver,
workload=workload,
numWorkers=numWorkers,
localWorker=localWorker,
tarball=tarball,
build=build,
undeploy=True,
)
test_list.append(t)
test_list = []
localWorker = False
tarball = 'package/target/openmessaging-benchmark-0.0.1-SNAPSHOT-bin.tar.gz'
build = False
# Message size 100 B
for repeat in range(1):
for producerWorkers in [1]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [5]:
for messageSize in [100]:
for producerRateEventsPerSec in [1e2, 1e3, 5e3, 1e4, 5e4, 1e5, 5e5, -1]:
for topics in [1]:
for partitionsPerTopic in [1]:
for producersPerWorker in [1]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
# Message size 10 KB
for repeat in range(1):
for producerWorkers in [1]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [5]:
for messageSize in [10000]:
for producerRateEventsPerSec in [500, 1000, 3000, 6000, 9000, 12000, 15000, -1]:
for topics in [1]:
for partitionsPerTopic in [1]:
for producersPerWorker in [1]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
#
# Message size 100 KB
for repeat in range(1):
for producerWorkers in [1]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [5]:
for messageSize in [100000]:
for producerRateEventsPerSec in [10, 50, 100, 300, 600, 900, 1200, -1]:
# for producerRateEventsPerSec in [1300, 1500, 1700]:
for topics in [1]:
for partitionsPerTopic in [1]:
for producersPerWorker in [1]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
# # #
# # # # Message size 1 MB
for repeat in range(1):
for producerWorkers in [1]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [5]:
for messageSize in [1000000]:
for producerRateEventsPerSec in [1, 10, 30, 50, 70, 90, 110, 130, -1]:
# for producerRateEventsPerSec in [1]:
for topics in [1]:
for partitionsPerTopic in [1]:
for producersPerWorker in [1]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
# # Message size 100 B, 50,000 events/sec
# for repeat in range(1):
# for producerWorkers in [1]:
# numWorkers = 0 if localWorker else producerWorkers*2
# for testDurationMinutes in [5]:
# for messageSize in [100]:
# for producerRateEventsPerSec in [5e4]:
# for topics in [1]:
# for partitionsPerTopic in [16, 6, 1]:
# for producersPerWorker in [1]:
# producersPerTopic = int(producersPerWorker * producerWorkers)
# for consumerBacklogSizeGB in [0]:
# for subscriptionsPerTopic in [1]:
# for consumerPerSubscription in [partitionsPerTopic]:
# for includeTimestampInEvent in [True]:
# add_test()
print(json.dumps(test_list, sort_keys=True, indent=4, ensure_ascii=False))
print('Number of tests generated: %d' % len(test_list), file=sys.stderr)
```
#### File: p3_test_driver/tests/testgen_pravega_ssh.py
```python
from __future__ import print_function
import json
import sys
# Generates 33 tests with duration - 1min = 72m total run time
def add_test():
driver = {
'name': 'Pravega',
'driverClass': 'io.openmessaging.benchmark.driver.pravega.PravegaBenchmarkDriver',
'client': {
'controllerURI': 'tcp://localhost:9090',
'scopeName': 'examples',
},
'writer': {
'enableConnectionPooling': False,
'enableTransaction': False,
'eventPerTransaction': 1,
},
'includeTimestampInEvent': includeTimestampInEvent,
}
workload = {
'messageSize': messageSize,
'topics': topics,
'partitionsPerTopic': partitionsPerTopic,
'subscriptionsPerTopic': subscriptionsPerTopic,
'consumerPerSubscription': consumerPerSubscription,
'producersPerTopic': producersPerTopic,
'producerRate': producerRateEventsPerSec,
'consumerBacklogSizeGB': consumerBacklogSizeGB,
'testDurationMinutes': testDurationMinutes,
'keyDistributor': 'NO_KEY',
}
t = dict(
test='openmessaging-benchmark',
max_test_attempts=1,
driver=driver,
workload=workload,
numWorkers=numWorkers,
localWorker=localWorker,
tarball=tarball,
build=build,
undeploy=True,
)
test_list.append(t)
test_list = []
localWorker = False
tarball = '../package/target/openmessaging-benchmark-0.0.1-SNAPSHOT-bin.tar.gz'
build = False
# Message size 100 B
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [1]:
for messageSize in [100]:
for producerRateEventsPerSec in [3e1, 1e2, 3e2, 1e3, 3e3, 1e4, 3e4, 1e5, 3e5, -1]:
for topics in [1]:
for partitionsPerTopic in [16]:
for producersPerWorker in [16]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
# Message size 10 KB
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [1]:
for messageSize in [10000]:
for producerRateEventsPerSec in [3e1, 1e2, 3e2, 1e3, 3e3, 1e4, -1]:
for topics in [1]:
for partitionsPerTopic in [16]:
for producersPerWorker in [2]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
# Message size 1 MB
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [1]:
for messageSize in [1000*1000]:
for producerRateEventsPerSec in [1e0, 3e0, 1e1, 3e1, 1e2, 3e2, -1]:
for topics in [1]:
for partitionsPerTopic in [16]:
for producersPerWorker in [2]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
# Message size 100 B, 50,000 events/sec
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [1]:
for messageSize in [100]:
for producerRateEventsPerSec in [5e4]:
for topics in [1]:
for partitionsPerTopic in [16, 6, 1]:
for producersPerWorker in [16, 6, 1]:
producersPerTopic = int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for includeTimestampInEvent in [True]:
add_test()
print(json.dumps(test_list, sort_keys=True, indent=4, ensure_ascii=False))
print('Number of tests generated: %d' % len(test_list), file=sys.stderr)
``` |
{
"source": "JingerLi/Blender-Collada",
"score": 2
} |
#### File: Blender-Collada/io_scene_dae/collada_exporter.py
```python
import bpy
import numpy
import mathutils
from enum import Enum
from mathutils import Matrix, Quaternion, Vector
import xml.etree.ElementTree as ET
import os
os.system('cls')
mesh_targets = {}
controller_targets = {}
images = {}
class SourceType(Enum):
Name_array = 0
float_array = 1
class DataType(Enum):
string = 0
float = 1
float4x4 = 2
class Param:
name = ''
type = DataType.string
def __init__(self, n, t):
self.name = n
self.type = t
class AnimeChs:
def __init__(self):
self.locChs = []
self.quatChs = []
self.scaleChs = []
def addInputBlock(domNode, semantic, source, offset=None):
input = ET.SubElement(domNode, 'input')
input.set('semantic', semantic)
input.set('source', source)
if(offset != None):
input.set('offset', str(offset))
def buildSource(domNode, strdata, count, id, params, sourceType=SourceType.float_array):
sourceNode = ET.SubElement(domNode, 'source')
sourceNode.set('id', id)
data = ET.SubElement(sourceNode, sourceType.name)
data.set('id', id + '.data')
data.set('count', str(count))
data.text = strdata
techcom = ET.SubElement(sourceNode, 'technique_common')
accessor = ET.SubElement(techcom, 'accessor')
accessor.set('source', '#' + id + '.data')
stride = 0
for p in params:
t = p.type
param = ET.SubElement(accessor, 'param')
param.set('name', p.name)
param.set('type', t.name)
if( t == DataType.string or t == DataType.float):
stride += 1
elif ( t == DataType.float4x4 ):
stride += 16
if(stride != 0):
accessor.set('count', str(int(count/stride)))
accessor.set('stride', str(stride))
def matrixToStrList(mat, transpose):
if(transpose):
mat.transpose()
vals = numpy.asarray(mat).ravel()
matText = ' '.join( "{:.4f}".format(x) for x in vals )
return matText
def loadBonesTree( root, domNode, namebase ):
boneStack = []
domStack = []
boneStack.append(root)
domStack.append(domNode)
while len(boneStack) != 0:
cb = boneStack.pop()
dom = domStack.pop()
name = cb.name
dom.set('id', namebase + '.' + name)
dom.set('sid', name)
dom.set('type', 'JOINT')
matrix = ET.SubElement(dom, 'matrix')
matrix.set('sid', 'LOCALBINDING')
matrixInv = ET.SubElement(dom, 'matrix')
matrixInv.set('sid', 'INVBINDING')
parentMatInv = Matrix.Identity(4)
if(cb.parent != None):
parentMatInv = cb.parent.matrix_local.copy()
parentMatInv.invert()
localMat = cb.matrix_local.copy();
mat = parentMatInv * localMat
localMat.invert()
matrix.text = matrixToStrList(mat, True)
matrixInv.text = matrixToStrList(localMat, True)
for c in cb.children:
dc = ET.SubElement(dom, 'node')
boneStack.append(c)
domStack.append(dc)
def loadNodeArmature(obj, domNode):
armature = obj.data
matText = matrixToStrList(obj.matrix_world.copy(), True)
matNode = ET.SubElement(domNode, 'matrix')
matNode.text = matText
roots = []
bones = armature.bones
for b in bones:
if(b.parent == None):
roots.append(b)
for r in roots:
boneRoot = ET.SubElement(domNode, 'node')
loadBonesTree(r, boneRoot, obj.name)
def loadNodeMesh(obj, domNode ):
matText = matrixToStrList(obj.matrix_world.copy(), True)
matNode = ET.SubElement(domNode, 'matrix')
matNode.text = matText
mesh = obj.data
mesh_targets[mesh.name] = mesh
instGeo = ET.SubElement(domNode, 'instance_geometry')
instGeo.set('url', '#' + mesh.name)
for m in obj.modifiers:
id = m.name + '.' + obj.name + '.skin'
instCtrl = ET.SubElement(domNode, 'instance_controller')
instCtrl.set('url', '#' + id)
ctrlMeta = { 'object': obj, 'mesh': mesh, 'modifier': m}
controller_targets[id] = ctrlMeta
def loadLibControllers( lib_controllers ):
for c in controller_targets:
meta = controller_targets[c]
obj = meta['object']
mesh = meta['mesh']
modifier = meta['modifier'].object
vGroups = obj.vertex_groups
sourceName_0 = c + '.groups'
vertGroups = []
for vg in vGroups:
vertGroups.append(vg.name)
bonesNameList = ' '.join( n for n in vertGroups)
weightDictionary = {}
weights = []
vcount = []
v = []
vertices = mesh.vertices
for vert in vertices:
vcount.append(len(vert.groups))
for g in vert.groups:
if( g.weight not in weightDictionary ):
weightDictionary[g.weight] = len(weights)
weights.append(g.weight)
weightIndex = weightDictionary[g.weight]
v.append(g.group)
v.append(weightIndex)
sourceName_2 = c + '.skin.weights'
weightsStr = ' '.join( "{:.4f}".format(w) for w in weights)
ctrl = ET.SubElement(lib_controllers, 'controller')
ctrl.set('id', c)
ctrl.set('name', modifier.name)
skin = ET.SubElement(ctrl, 'skin')
skin.set('source', '#' + mesh.name)
bsmat = ET.SubElement(skin, 'bind_shape_matrix')
object = meta['object'];
bsmat.text = matrixToStrList(object.matrix_local.copy(), True)
buildSource(skin, bonesNameList, len(vGroups), sourceName_0, [ Param('GROUPS',DataType.string) ], SourceType.Name_array)
buildSource(skin, weightsStr, len(weights), sourceName_2, [Param('WEIGHT',DataType.float)], SourceType.float_array)
vertexWeightDom = ET.SubElement(skin, 'vertex_weights')
vertexWeightDom.set('count', str(len(vcount)))
addInputBlock(vertexWeightDom, 'GROUPS', '#' + sourceName_0, 0)
addInputBlock(vertexWeightDom, 'WEIGHT', '#' + sourceName_2, 1)
vcountDom = ET.SubElement(vertexWeightDom, 'vcount')
vcountDom.text = ' '.join(str(val) for val in vcount )
vDom = ET.SubElement(vertexWeightDom, 'v')
vDom.text = ' '.join(str(val) for val in v )
def loadLibGeometries( lib_geometries ):
for g in mesh_targets:
mesh = mesh_targets[g]
vertices = mesh.vertices
vertPosStrs = []
for v in vertices:
vertPosStrs.append(' '.join( "{:.4f}".format(val) for val in v.co ))
sourceNamePos = g + '.vertex.position'
vertStrData = ' '.join( str for str in vertPosStrs)
loops = mesh.loops
uvSet = 0
allUVCoordsName = []
allUVCoords = []
uvLayers = mesh.uv_layers
for uvLayer in uvLayers:
uvData = uvLayer.data
uvCoords = ['0.0 0.0'] * len(vertices)
for li in range(len(loops)):
vi = loops[li].vertex_index
uvCoords[vi] = ' '.join( "{:.4f}".format(val) for val in uvData[li].uv )
allUVCoordsName.append( g + '.uvlayer' + str(uvSet))
allUVCoords.append(uvCoords)
uvSet+=1
polygons = mesh.polygons
triangles = []
triangleNormals = []
for p in polygons:
nal = numpy.asarray(p.normal)
ni = len(triangleNormals)
triangleNormals.append(' '.join( "{:.4f}".format(val) for val in nal))
s = p.loop_start
if(p.loop_total == 3):
triangles.append( loops[s+0].vertex_index)
triangles.append(ni)
triangles.append( loops[s+1].vertex_index)
triangles.append(ni)
triangles.append( loops[s+2].vertex_index)
triangles.append(ni)
elif(p.loop_total == 4):
triangles.append( loops[s+0].vertex_index)
triangles.append(ni)
triangles.append( loops[s+1].vertex_index)
triangles.append(ni)
triangles.append( loops[s+2].vertex_index)
triangles.append(ni)
triangles.append( loops[s+0].vertex_index)
triangles.append(ni)
triangles.append( loops[s+2].vertex_index)
triangles.append(ni)
triangles.append( loops[s+3].vertex_index)
triangles.append(ni)
else:
print('Plygon has to be triangles or quads...')
sourceTriNormals = g + '.triangle.normals'
sourceTriNormalsData = ' '.join( str for str in triangleNormals)
geometry = ET.SubElement(lib_geometries, 'geometry')
geometry.set('id', g)
meshDom = ET.SubElement(geometry, 'mesh')
buildSource(meshDom, vertStrData, len(vertices) * 3, sourceNamePos,
[ Param('x',DataType.float), Param('y',DataType.float), Param('z',DataType.float) ], SourceType.float_array)
for i in range(len(allUVCoords)):
uvCoord = allUVCoords[i]
datum = ' '.join( str for str in uvCoord )
buildSource(meshDom, datum, len(allUVCoords[i]) * 2, allUVCoordsName[i],
[ Param('u',DataType.float), Param('v',DataType.float)], SourceType.float_array)
buildSource(meshDom, sourceTriNormalsData, len(triangleNormals) * 3, sourceTriNormals,
[ Param('x',DataType.float), Param('y',DataType.float), Param('z',DataType.float) ], SourceType.float_array)
verticesDom = ET.SubElement(meshDom, 'vertices')
verticesDomID = g + '.vertices'
verticesDom.set('id', verticesDomID)
vertexPosInput = ET.SubElement(verticesDom, 'input')
vertexPosInput.set('semantic', 'POSITION')
vertexPosInput.set('source', '#' + sourceNamePos)
for i in range(len(allUVCoords)):
vertexTexCoordInput = ET.SubElement(verticesDom, 'input')
vertexTexCoordInput.set('semantic', 'TEXCOORD' + str(i))
vertexTexCoordInput.set('source', '#' + allUVCoordsName[i])
trianglesDom = ET.SubElement(meshDom, 'triangles')
trianglesDom.set('count', str(int(len(triangles)/3)))
triangleInput = ET.SubElement(trianglesDom, 'input')
triangleInput.set('semantic', 'VERTEX')
triangleInput.set('source', '#' + verticesDomID)
triangleInput.set('offset', '0')
triangleInput = ET.SubElement(trianglesDom, 'input')
triangleInput.set('semantic', 'NORMAL')
triangleInput.set('source', '#' + sourceTriNormals)
triangleInput.set('offset', '1')
pData = ' '.join( str(v) for v in triangles)
pDom = ET.SubElement(trianglesDom, 'p')
pDom.text = pData
def loadLibVisualScene( lib_visual_scene ):
objscene = bpy.data.scenes[0]
domScene = ET.SubElement(lib_visual_scene, 'visual_scene')
objs = objscene.objects
for obj in objs:
objName = obj.name
objType = obj.type
domNode = ET.SubElement(domScene, 'node')
domNode.set('id', objName)
domNode.set('obj_type', objType)
domNode.set('type', 'NODE')
if(obj.type == 'MESH'):
loadNodeMesh(obj, domNode)
elif(obj.type == 'ARMATURE'):
loadNodeArmature(obj, domNode)
def buildAnimation( node, strip, armature ):
if(strip == None):
return;
action = strip.action
actionIDRoot = action.id_root
if(actionIDRoot == 'MESH'):
#print('Handle fcurve in MESH mode')
#1. pick up vertices that changes in the clip.
#2. build source, channel, sampler for each such vertex.
fcurves = action.fcurves
print('Build sources and channels for vertices ' + str(len(fcurves)))
print('Removing dead vertex is required.')
elif (actionIDRoot == 'OBJECT'):
channels = action.fcurves
boneTimeSets = {}
boneTimelines = {}
boneAnimes = {}
for ch in channels:
rna = ch.data_path
f0 = rna.find('\"') + 1
f1 = rna.find('\"', f0)
boneName = rna[f0:f1]
locRotScalType = rna.split('.')[-1]
bone = armature.bones[boneName]
if(boneName not in boneTimeSets):
boneTimeSets[boneName] = set()
kfpts = ch.keyframe_points
for kf in kfpts:
boneTimeSets[boneName].add(kf.co[0])
if(boneName not in boneAnimes):
boneAnimes[boneName] = AnimeChs()
boneAnime = boneAnimes[boneName]
if(locRotScalType == 'rotation_quaternion'):
boneAnime.quatChs.append(ch)
elif(locRotScalType == 'location'):
boneAnime.locChs.append(ch)
elif(locRotScalType == 'scale'):
boneAnime.scaleChs.append(ch)
boneFCurves = {}
boneInterpolations = {}
for bn in boneAnimes:
abone = armature.bones[bn]
connect = abone.use_connect
timeline = list( boneTimeSets[bn])
timeline.sort()
boneTimelines[bn] = timeline
boneAnime = boneAnimes[bn]
if(bn not in boneFCurves):
boneFCurves[bn] = []
boneMatStr = boneFCurves[bn]
if(bn not in boneInterpolations):
boneInterpolations[bn] = []
boneInterpolation = boneInterpolations[bn]
for tl in timeline:
location= []
quaternion = []
scale = []
if(not connect):
for ch in boneAnime.locChs:
location.append(ch.evaluate(tl))
for ch in boneAnime.quatChs:
quaternion.append(ch.evaluate(tl))
for ch in boneAnime.scaleChs:
scale.append(ch.evaluate(tl))
matLoc = Matrix.Identity(4) if len(location) != 3 else Matrix.Translation( ( location[0], location[1], location[2]) )
matRot = Matrix.Identity(4) if len(quaternion) != 4 else Quaternion( (quaternion[0], quaternion[1], quaternion[2], quaternion[3]) ).to_matrix().to_4x4()
matScl = Matrix.Identity(4)
if( len(scale) == 3):
matScl[0][0] = scale[0]
matScl[1][1] = scale[1]
matScl[2][2] = scale[2]
mat = matRot * matScl * matLoc
matStrs = matrixToStrList(mat, True)
boneMatStr.append(matStrs)
boneInterpolation.append('LINEAR')
for bn in boneFCurves:
timeline = boneTimelines[bn]
timelineDatumName = bn + '.timeline'
datumTimeline = ' '.join( str(v) for v in timeline)
buildSource(node, datumTimeline, len(timeline), timelineDatumName,
[ Param('TIME',DataType.float) ], SourceType.float_array)
transMats = boneFCurves[bn]
transformName = bn + '.transform'
datumTransform = ' '.join( v for v in transMats )
buildSource(node, datumTransform, len(transMats) * 16, transformName,
[ Param('TRANSFORM',DataType.float4x4) ], SourceType.float_array)
interpolation = boneInterpolations[bn]
interpoName = bn + '.interpolation'
datumInterpo = ' '.join( v for v in interpolation )
buildSource(node, datumInterpo, len(interpolation), interpoName,
[ Param('INTERPOLATION',DataType.string) ], SourceType.Name_array)
samplerID = bn + '.sampler'
sampler = ET.SubElement(node, 'sampler')
sampler.set('id', samplerID)
addInputBlock(sampler, 'INPUT', '#' + timelineDatumName)
addInputBlock(sampler, 'OUTPUT', '#' + transformName)
addInputBlock(sampler, 'INTERPOLATION', '#' + interpoName)
channel = ET.SubElement(node, 'channel')
channel.set('source', '#' + samplerID)
channel.set('target', bn + '/transform')
# DO NOT Support MESH animation yet.
# ONLY support linear matrix interpolation for smaller file size.
def loadLibAnimations(lib_animations):
objscene = bpy.data.scenes[0]
objs = objscene.objects
for obj in objs:
obj.update_from_editmode()
objName = obj.name
objType = obj.type
animData = None
type = None
if(objType == 'ARMATURE'):
animData = obj.animation_data
#elif(objType == 'MESH' and obj.data.animation_data != None ):
# animData = obj.data.animation_data
if(animData != None):
tracks = animData.nla_tracks
for tra in tracks:
traNode = ET.SubElement(lib_animations, 'animation')
traNode.set('id', objName + '.' + tra.name)
strip = tra.strips[0]
buildAnimation(traNode, strip, obj.data)
def prettify( root ):
lvstack = []
elmstack = []
lvstack.append(0)
elmstack.append(root)
while len(elmstack) != 0:
lv = lvstack.pop()
p = elmstack.pop()
if(len(p) != 0 ):
p.text = '\n' + (lv + 1) * '\t'
for c in reversed(p):
c.tail = '\n' + (lv + 1) * '\t'
elmstack.append(c)
lvstack.append(lv + 1)
p[-1].tail = '\n' + lv * '\t'
def export( context, filepath ):
collada = ET.Element('COLLADA')
collada.set('xmlns', 'http://www.collada.org/2005/11/COLLADASchema')
collada.set('version', '1.5.0')
collada.set('xmlns:xsi', 'http://www.w3.org/2001/XMLSchema-instance')
lib_animations = ET.SubElement(collada, 'library_animations')
lib_geometries = ET.SubElement(collada, 'library_geometries')
lib_controllers = ET.SubElement(collada, 'library_controllers')
lib_visual_sence = ET.SubElement(collada, 'library_visual_scenes')
loadLibVisualScene(lib_visual_sence)
loadLibGeometries(lib_geometries)
loadLibControllers(lib_controllers)
loadLibAnimations(lib_animations)
prettify(collada)
tree = ET.ElementTree(collada)
tree.write(filepath, encoding="utf-8", xml_declaration=True)
#### comment this test output part when deploying. ####
#export(bpy.context, r'D://projects//dae_library//assets//dae_dev_mesh.dae')
``` |
{
"source": "JingerTea/undetetable_selenium",
"score": 3
} |
#### File: selenium_toolbox/buster_captcha_solver/move_mouse_like_human.py
```python
import numpy as np
import scipy.interpolate as si
# Using B-spline for simulate humane like mouse movments
def move_mouse_like_human(action, start_element):
points = [[6, 2], [3, 2], [0, 0], [0, 2]]
points = np.array(points)
x = points[:, 0]
y = points[:, 1]
t = range(len(points))
ipl_t = np.linspace(0.0, len(points) - 1, 100)
x_tup = si.splrep(t, x, k=1)
y_tup = si.splrep(t, y, k=1)
x_list = list(x_tup)
xl = x.tolist()
x_list[1] = xl + [0.0, 0.0, 0.0, 0.0]
y_list = list(y_tup)
yl = y.tolist()
y_list[1] = yl + [0.0, 0.0, 0.0, 0.0]
x_i = si.splev(ipl_t, x_list)
y_i = si.splev(ipl_t, y_list)
startElement = start_element
action.move_to_element(startElement)
action.perform()
c = 5
i = 0
for mouse_x, mouse_y in zip(x_i, y_i):
action.move_by_offset(mouse_x, mouse_y)
action.perform()
i += 1
if i == c:
break
``` |
{
"source": "jingexu/cells-in-gel",
"score": 3
} |
#### File: cells-in-gel/cells_in_gel/preprocess.py
```python
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from scipy.ndimage import binary_fill_holes as fillholes
from skimage import img_as_ubyte
from skimage.util import img_as_float
from skimage.exposure import adjust_sigmoid
from skimage.filters import threshold_otsu, threshold_triangle, rank, laplace, sobel
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square, disk, remove_small_objects, opening, dilation, watershed, erosion
from skimage.color import label2rgb, rgb2gray
from skimage.transform import rescale
import os
from os.path import join
from scipy import ndimage as ndi
def frequency_filter(im, mu, sigma, passtype='low'):
'''
This function applies a lowpass or highpass filter to an image.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
passtype: string
Applies a 'high' or 'low' pass filter. Default value is 'low'.
Returns
-------
out : ndarray
Low or high pass filtered output image.
Examples
--------
>>> image = plt.imread('..\C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
>>> lowpass = frequency_filter(im, 500, 70, passtype='low')
'''
# define x and y based on image shape
y_length, x_length = np.shape(im)
xi = np.linspace(0, x_length-1, x_length)
yi = np.linspace(0, y_length-1, y_length)
x, y = np.meshgrid(xi, yi)
# define lowpass or highpass filter
if passtype == 'low':
gfilt = np.exp(-((x-mu)**2 + (y-mu)**2)/(2*sigma**2))
if passtype == 'high':
gfilt = 1 - np.exp(-((x-mu)**2 + (y-mu)**2)/(2*sigma**2))
fim = np.fft.fft2(im) # moving to spacial domain
fim_c = np.fft.fftshift(fim) # centering
fim_filt = np.multiply(fim_c, gfilt) # apply the filter
fim_uc = np.fft.ifftshift(fim_filt) # uncenter
im_pass = np.real(np.fft.ifft2(fim_uc)) # perform inverse transform
return im_pass
def _check_dtype_supported(ar):
'''
Used in remove_large_objects function and taken from
skimage.morphology package.
'''
# Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):
raise TypeError("Only bool or integer image types are supported. "
"Got %s." % ar.dtype)
def remove_large_objects(ar, max_size=10000, connectivity=1, in_place=False):
'''
Remove connected components larger than the specified size. (Modified from
skimage.morphology.remove_small_objects)
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the connected components of interest. If the array
type is int, it is assumed that it contains already-labeled objects.
The ints must be non-negative.
max_size : int, optional (default: 10000)
The largest allowable connected component size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel.
in_place : bool, optional (default: False)
If `True`, remove the connected components in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_large_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> d = morphology.remove_large_objects(a, 6, in_place=True)
>>> d is a
True
'''
# Raising type error if not int or bool
_check_dtype_supported(ar)
if in_place:
out = ar
else:
out = ar.copy()
if max_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
selem = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = np.zeros_like(ar, dtype=np.int32)
ndi.label(ar, selem, output=ccs)
else:
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
if len(component_sizes) == 2:
warn("Only one label was provided to `remove_small_objects`. "
"Did you mean to use a boolean array?")
too_large = component_sizes > max_size
too_large_mask = too_large[ccs]
out[too_large_mask] = 0
return out
def phalloidin_labeled(im, selem=disk(3), mu=500, sigma=70, cutoff=0, gain=100,
min_size=250, max_size=10000, connectivity=1):
"""
Signature: phalloidin_labeled(*args)
Docstring: Segment and label image
Extended Summary
----------------
The colorize function applies preprocessing filters (contrast and high
pass) then defines the threshold value for the desired image. Thresholding
is calculated by the otsu function creates a binarized image by setting
pixel intensities above that thresh value to white, and the ones below to
black (background). Next, it cleans up the image by filling in random noise
within the cell outlines and removes small background objects. It then
labels adjacent pixels with the same value and defines them as a region.
It returns an RGB image with color-coded labels.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
selem : numpy.ndarray, optional
Area used for separating cells. Default value is
skimage.morphology.disk(3).
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 100.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
min_size : int, optional
The smallest allowable object size. Default value is 250.
max_size : int, optional
The largest allowable object size. Default value is 10000.
connectivity : int, optional
The connectvitivy defining the neighborhood of a pixel. Default value
is 1.
Returns
-------
out : label_image (ndarray) segmented and object labeled for analysis
Examples
--------
image = plt.imread('C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
label_image = phalloidin_488_binary(image, mu=500, sigma=70,
cutoff=0, gain=100)
image = plt.imread('..\C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
label, overlay = phalloidin_488_segment(image, mu=500, sigma=70,
cutoff=0, gain=100)
"""
# contrast adjustment
im_con = adjust_sigmoid(im, cutoff=cutoff, gain=gain, inv=False)
# contrast + low pass filter
im_lo = frequency_filter(im_con, mu, sigma, passtype='low')
# contrast + low pass + binary
thresh = threshold_otsu(im_lo, nbins=256)
im_bin = im_lo > thresh
# fill holes, separate cells, and remove small/large objects
im_fill = ndimage.binary_fill_holes(im_bin)
im_open = opening(im_fill, selem)
im_clean_i = remove_small_objects(im_open, min_size=min_size,
connectivity=connectivity, in_place=False)
im_clean = remove_large_objects(im_clean_i, max_size=max_size,
connectivity=connectivity, in_place=False)
# labelling regions that are cells
label_image = label(im_clean)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=im, bg_label=0)
print(image_label_overlay.shape)
# plot overlay image
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
return (label_image)
def SMA_segment(im, mu=500, sigma=70, cutoff=0, gain=100,
min_size=100, connectivity=1):
"""
This function binarizes a Smooth muscle actin (SMA) fluorescence microscopy channel
using contrast adjustment, high pass filter, otsu thresholding, and removal
of small objects.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 100.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
min_size : int, optional
The smallest allowable object size. Default value is 100.
connectivity : int, optional
The connectvitivy defining the neighborhood of a pixel. Default value
is 1.
Returns
-------
out : label_image (ndarray) segmented and object labeled for analysis,
image_label_overlay (ndarray)
Examples
--------
>>> image = plt.imread('..\C4-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
>>> label, overlay = SMA_segment(image, mu=500, sigma=70,
cutoff=0, gain=100)
"""
# contrast adjustment
im_con = adjust_sigmoid(im, cutoff=cutoff, gain=gain, inv=False)
# contrast + low pass filter
im_lo = frequency_filter(im_con, mu, sigma, passtype='low')
# contrast + low pass + binary
thresh = threshold_otsu(im_lo, nbins=256)
im_bin = im_lo > thresh
# remove small objects
im_bin_clean = remove_small_objects(im_bin, min_size=min_size,
connectivity=connectivity,
in_place=False)
# labelling regions that are cells
label_image = label(im_bin_clean)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=im, bg_label=0)
return label_image, image_label_overlay
def colorize(image, i, x):
"""
Signature: colorize(*args)
Docstring: segment and label image
Extended Summary:
----------------
The colorize function defines the threshold value for the desired image by
the triangle function and then creates a binarized image by setting pixel
intensities above that thresh value to white, and the ones below to black
(background). Next, it closes up the image by filling in random noise
within the cell outlines and smooths/clears out the border. It then labels
adjacent pixels with the same value and defines them as a region. It
returns an RGB image with color-coded labels.
Parameters:
----------
image : 2D array
greyscale image
i : int
dimension of square to be used for binarization
x : float
dimension of image in microns according to imageJ
Returns:
--------
RGB image overlay
int : 2D ndarray
"""
# resizing image
image = rescale(image, x/1024, anti_aliasing=False)
# applying threshold to image
thresh = threshold_triangle(image)
binary = closing(image > thresh, square(i))
binary = ndimage.binary_fill_holes(binary)
# cleaning up boundaries of cells
cleared = clear_border(binary)
# labelling regions that are cells
label_image = label(cleared)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=image, bg_label=0)
print(image_label_overlay.shape)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
return (label_image)
def sharpen_nuclei(image, selem=square(8), ksize=10, alpha=0.2, sigma=40,
imshow=True):
"""
Highlight nucleis in the image.
Make a sharp contrast between nucleis and background to highlight nucleis
in the input image, achieved by mean blurring, laplace sharpening, and
Gaussian high-pass filter. Selem, ksize, alpha, sigma parameters have
default values while could be customize by user.
Parameters
----------
image : numpy.ndarray
grayscale image which needs to enhance the nucleis.
selem : numpy.ndarray
area used for scanning in blurring, default to be square(8).
ksize : int
ksize used for laplace transform, default to be 10.
alpha : float
coefficient used in laplace sharpening, default to be 0.2.
sigma : int
power coefficient in Gussian filter, default to be 40.
imshow : bool, str
users choose whether to show the processed images, default to be True.
Returns
----------
Return to 2 processed grayscale images with sharpened nucleis(2 dimension arrays)
in the image using two different sharpening styles.
"""
image = img_as_ubyte(image)
def custom(image):
imin = np.min(image)
imax = np.max(image)
full = imax - imin
new = (image - imin)/full
return new
im = custom(image)
print(im.shape)
threshold2 = np.mean(im) + 3*np.std(im)
print(threshold2)
im1 = im > threshold2
im2 = rank.mean(im1, selem)
im21 = custom(im2)
threshold3 = np.mean(im21) + np.std(im21)
print(threshold3)
im3 = im > threshold3
im5 = laplace(im2, ksize=ksize)
im4 = im2 + alpha*im5
threshold4 = np.mean(im4) + np.std(im4)
im4 = im4 > threshold4
xi = np.linspace(0, (im.shape[1]-1), im.shape[1])
yi = np.linspace(0, (im.shape[0]-1), im.shape[0])
x, y = np.meshgrid(xi, yi)
sigma = sigma
mi = im.shape[1]/2
ni = im.shape[0]/2
gfilt = np.exp(-((x-mi)**2+(y-ni)**2)/(2*sigma**2))
fim = np.fft.fft2(im1)
fim2 = np.fft.fftshift(fim)
fim3 = np.multiply(fim2, gfilt)
fim4 = np.fft.ifftshift(fim3)
im6 = np.real(np.fft.ifft2(fim4))
im7 = custom(im6)
threshold6 = np.mean(im7)+0.2*np.std(im7)
print(threshold6)
im7 = im6 > threshold6
f1 = im4*1
f2 = im7*1
if imshow == True:
fig, ax = plt.subplots(1, 3, figsize=(18, 10))
ax[0].imshow(image)
ax[1].imshow(f1, cmap='gray')
ax[2].imshow(f2, cmap='gray')
ax[0].set_title('original image', fontsize=25)
ax[1].set_title('Blur and Laplace', fontsize=25)
ax[2].set_title('Gaussian Filter', fontsize=25)
for i in [0, 1, 2]:
ax[i].axis('off')
else:
pass
return [f1, f2]
def enhance_nucleis(image, open_selem=disk(5), image_display=True):
"""
Highlight nucleis in the image.
Make a sharp contrast between nucleis and background to highlight nucleis
in the input image, achieved by opening, dilation, sobel, watershed, and threshod.
Selem have default values while could be customize by user.
Parameters
----------
image : numpy.ndarray
grayscale image which needs to enhance the nucleis.
selem : numpy.ndarray
area used for opening process, default to be disk(5).
image_display : bool, str
users choose whether to show the enhanced images, default to be True.
----------
Return
----------
Return a processed grayscale image(2 dimension array) with enhanced nucleis.
"""
im1 = img_as_ubyte(image)
im_open = opening(im1, open_selem)
elevation_map = img_as_ubyte(dilation(sobel(im_open)), disk(4))
im2 = watershed(elevation_map, im_open)
im22 = (im2 > threshold_otsu(im2))*1
im3 = erosion((fillholes(elevation_map))*1, disk(2))
if image_display == True:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].imshow(im22, cmap='gray')
ax[1].imshow(im3, cmap='gray')
ax[0].set_title('method1', fontsize=20)
ax[1].set_title('method2', fontsize=20)
ax[0].axis('off')
ax[1].axis('off')
else:
pass
return im22
def list_of_images(image_channel, mypath):
"""
Automatically extract all the images belonging to a channel into a list
Parameters
----------
image_channel : str
the channel of which user wants to extract images.
mypath : str
path name where images are located.
----------
Returns
----------
Return to a list composed of all images belonging to a channel
"""
#mypath = '/Users/irinakopyeva/documents/Channel_Separated'
namelist = []
tifflist = []
for root, dirs, files in os.walk(mypath):
for name in files:
if name[0:2] == image_channel:
namelist.append(name)
j = os.path.join(root, name)
tifflist.append(j)
return tifflist
``` |
{
"source": "jingfeidu/pytext-1",
"score": 3
} |
#### File: pytext/metric_reporters/metric_reporter.py
```python
from typing import Dict, List
import torch
from pytext.config.component import Component, ComponentType
from pytext.config.pytext_config import ConfigBase
class MetricReporter(Component):
"""
MetricReporter is responsible of three things:
#. Aggregate output from trainer, which includes model inputs, predictions,
targets, scores, and loss.
#. Calculate metrics using the aggregated output, and define how the metric
is used to find best model
#. Optionally report the metrics and aggregated output to various channels
Attributes:
lower_is_better (bool): Whether a lower metric indicates better performance.
Set to True for e.g. perplexity, and False for e.g. accuracy. Default
is False
channels (List[Channel]): A list of Channel that will receive metrics and
the aggregated trainer output then format and report them in any customized
way.
"""
__COMPONENT_TYPE__ = ComponentType.METRIC_REPORTER
lower_is_better: bool = False
class Config(ConfigBase):
output_path: str = "/tmp/test_out.txt"
def __init__(self, channels) -> None:
self._reset()
self.channels = channels
def _reset(self):
self.all_preds: List = []
self.all_targets: List = []
self.all_context: Dict = {}
self.all_loss: List = []
self.all_scores: List = []
self.n_batches = 0
self.batch_size: List = []
def add_batch_stats(
self, n_batches, preds, targets, scores, loss, m_input, **context
):
"""
Aggregates a batch of output data (predictions, scores, targets/true labels
and loss).
Args:
n_batches (int): number of current batch
preds (torch.Tensor): predictions of current batch
targets (torch.Tensor): targets of current batch
scores (torch.Tensor): scores of current batch
loss (double): average loss of current batch
m_input (Tuple[torch.Tensor, ...]): model inputs of current batch
context (Dict[str, Any]): any additional context data, it could be
either a list of data which maps to each example, or a single value
for the batch
"""
self.n_batches = n_batches
self.aggregate_preds(preds)
self.aggregate_targets(targets)
self.aggregate_scores(scores)
for key, val in context.items():
if not (isinstance(val, torch.Tensor) or isinstance(val, List)):
continue
if key not in self.all_context:
self.all_context[key] = []
self.aggregate_data(self.all_context[key], val)
self.all_loss.append(loss)
self.batch_size.append(len(m_input[0]))
def aggregate_preds(self, new_batch):
self.aggregate_data(self.all_preds, new_batch)
def aggregate_targets(self, new_batch):
self.aggregate_data(self.all_targets, new_batch)
def aggregate_scores(self, new_batch):
self.aggregate_data(self.all_scores, new_batch)
@classmethod
def aggregate_data(cls, all_data, new_batch):
"""
Aggregate a batch of data, basically just convert tensors to list of native
python data
"""
if new_batch is None:
return
simple_list = cls._make_simple_list(new_batch)
all_data.extend(simple_list)
@classmethod
def _make_simple_list(cls, data):
if isinstance(data, torch.Tensor):
return data.tolist()
elif isinstance(data, List) and all(
isinstance(elem, torch.Tensor) for elem in data
):
return [elem.tolist() for elem in data]
elif isinstance(data, List):
return data
elif isinstance(data, tuple):
return data[0].tolist()
else:
raise NotImplementedError()
def add_channel(self, channel):
self.channels.append(channel)
def batch_context(self, batch):
return {}
def calculate_loss(self):
"""
Calculate the average loss for all aggregated batch
"""
return sum(self.all_loss) / float(len(self.all_loss))
def calculate_metric(self):
"""
Calculate metrics, each sub class should implement it
"""
raise NotImplementedError()
def gen_extra_context(self):
"""
Generate any extra intermediate context data for metric calculation
"""
pass
# TODO this method can be removed by moving Channel construction to Task
def get_meta(self):
"""
Get global meta data that is not specific to any batch, the data will be
pass along to channels
"""
return {}
def report_metric(self, model, stage, epoch, reset=True, print_to_channels=True):
"""
Calculate metrics and average loss, report all statistic data to channels
Args:
model (nn.Module): the PyTorch neural network model.
stage (Stage): training, evaluation or test
epoch (int): current epoch
reset (bool): if all data should be reset after report, default is True
print_to_channels (bool): if report data to channels, default is True
"""
self.gen_extra_context()
self.total_loss = self.calculate_loss()
metrics = self.calculate_metric()
model_select_metric = self.get_model_select_metric(metrics)
if print_to_channels:
for channel in self.channels:
if stage in channel.stages:
channel.report(
stage,
epoch,
metrics,
model_select_metric,
self.total_loss,
self.all_preds,
self.all_targets,
self.all_scores,
self.all_context,
self.get_meta(),
model,
)
if reset:
self._reset()
return metrics
def get_model_select_metric(self, metrics):
"""
Return a single numeric metric value that is used for model selection, returns
the metric itself by default, but usually metrics will be more complicated
data structures
"""
return metrics
def compare_metric(self, new_metric, old_metric):
"""
Check if new metric indicates better model performance
Returns:
bool, true if model with new_metric performs better
"""
if not old_metric:
return True
new = self.get_model_select_metric(new_metric)
old = self.get_model_select_metric(old_metric)
if new == old:
return False
return (new < old) == self.lower_is_better
``` |
{
"source": "jingfelix/Anonymous-Q-and-A",
"score": 3
} |
#### File: jingfelix/Anonymous-Q-and-A/logrec.py
```python
import time
from os import getcwd
log_file_path = getcwd() + '\data\log.txt'
def ipRecorder(ip):
'''
记录访问的ip地址并写入日志
'''
localtime = time.asctime(time.localtime(time.time()))
file = open(log_file_path, 'a+')
file.write(localtime + ' ' + str(ip) + '\n')
file.close()
pass
def errorRecorder(status):
'''
记录错误状态码并写入日志
'''
localtime = time.asctime(time.localtime(time.time()))
file = open(log_file_path, 'a')
file.write(localtime + 'ErrorStatus:' + str(status) + '\n')
file.close()
pass
``` |
{
"source": "JingfengRong/editnerf",
"score": 2
} |
#### File: JingfengRong/editnerf/load_blender.py
```python
import os
import json
import torch
import numpy as np
import imageio
import torchvision
def trans_t(t): return torch.Tensor([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1]]).float()
def rot_phi(phi): return torch.Tensor([
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1]]).float()
def rot_theta(th): return torch.Tensor([
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi / 180. * np.pi) @ c2w
c2w = rot_theta(theta / 180. * np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])) @ c2w
return c2w
def load_blender_data(basedir, trainskip=1, testskip=1, skip_val_test=False):
splits = ['train', 'val', 'test']
metas = {}
for s in splits:
trn_fname = os.path.join(basedir, 'transforms_{}.json'.format(s))
with open(trn_fname, 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if s == 'train':
skip = max(trainskip, 1)
else:
skip = max(testskip, 1)
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, frame['file_path'] + '.png')
if skip_val_test and s in ('val', 'test'):
# HACK: we don't have images for test/val views, but we'd at least like to see the rendered views
imgs.append(np.zeros(all_imgs[-1][-1].shape))
else:
imgs.append(imageio.imread(fname, ignoregamma=True, pilmode='RGB'))
poses.append(np.array(frame['transform_matrix']))
imgs = (np.array(imgs) / 255.).astype(np.float32)
poses = np.array(poses).astype(np.float32)
counts.append(counts[-1] + imgs.shape[0])
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[i + 1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
H, W = imgs[0].shape[:2]
if 'focal' not in meta:
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
else:
focal = meta['focal']
return imgs, poses, [H, W, focal], i_split
def load_chairs(basedir, args):
all_imgs = []
all_poses = []
all_i_split = [[], [], []]
all_style_inds = []
ref_imgs = []
hwfs = []
count = 0
if args.real_image_dir:
instance_names = [args.real_image_dir]
else:
with open(os.path.join(basedir, 'instances.txt')) as f:
instances = [x.strip() for x in f.readlines()]
instance_names = [os.path.join(basedir, instance_name) for instance_name in instances]
if args.instance >= 0:
instance_names = [instance_names[args.instance]]
for instance in instance_names:
imgs, poses, hwf, i_split = load_blender_data(instance, args.trainskip, args.testskip, skip_val_test=args.real_image_dir)
hwfs += [hwf for _ in range(imgs.shape[0])]
N_train, N_val, N_test = [len(x) for x in i_split]
train, val, test = imgs[:N_train], imgs[N_train:N_train + N_val], imgs[N_train + N_val:N_train + N_val + N_test]
train_poses, val_poses, test_poses = poses[:N_train], poses[N_train:N_train + N_val], poses[N_train + N_val:N_train + N_val + N_test]
imgs = np.concatenate([train, val, test])
poses = np.concatenate([train_poses, val_poses, test_poses])
for i in range(3):
all_i_split[i].append(count + i_split[i])
all_style_inds.append(torch.zeros((imgs.shape[0])).long() + len(all_imgs))
ref_imgs.append(imgs[0])
all_imgs.append(imgs)
all_poses.append(poses)
if len(all_imgs) >= args.N_instances:
break
count += imgs.shape[0]
for i in range(3):
all_i_split[i] = np.concatenate(all_i_split[i], 0)
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
# View examples instances we're training on
todir = os.path.join(args.basedir, args.savedir if args.savedir else args.expname)
os.makedirs(todir, exist_ok=True)
ref_imgs = torch.from_numpy(np.stack(ref_imgs, 0))[:128, :, :, :3].permute(0, 3, 1, 2)
torchvision.utils.save_image(ref_imgs, os.path.join(todir, 'ref.png'))
return imgs, poses, torch.tensor(hwfs), all_i_split, torch.cat(all_style_inds, dim=0)
```
#### File: JingfengRong/editnerf/test_nerf.py
```python
import random
import torch
import os
import numpy as np
from rendering import render_path
from dataset import load_data
from inputs import config_parser
from model import create_nerf
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np.random.seed(0)
def test():
parser = config_parser()
args = parser.parse_args()
images, poses, style, i_test, i_train, bds_dict, dataset, hwfs, near_fars, _ = load_data(args)
images_test, poses_test, style_test, hwfs_test, nf_test = images[i_test], poses[i_test], style[i_test], hwfs[i_test], near_fars[i_test]
images_train, poses_train, style_train, hwfs_train, nf_train = images[i_train], poses[i_train], style[i_train], hwfs[i_train], near_fars[i_train]
# Create log dir and copy the config file
basedir = args.basedir
expname = args.expname
render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer = create_nerf(args)
np.save(os.path.join(basedir, expname, 'poses.npy'), poses_train.cpu())
np.save(os.path.join(basedir, expname, 'hwfs.npy'), hwfs_train.cpu())
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
with torch.no_grad():
if args.render_test:
if args.shuffle_poses:
print('Shuffling test poses')
permutation = list(range(len(poses_test)))
random.shuffle(permutation)
poses_test = poses_test[permutation]
testsavedir = os.path.join(basedir, expname, 'test_imgs{:06d}'.format(start))
os.makedirs(testsavedir, exist_ok=True)
_, _, psnr = render_path(poses_test.to(device), style_test, hwfs_test, args.chunk, render_kwargs_test, nfs=nf_test, gt_imgs=images_test, savedir=testsavedir)
print('Saved test set w/ psnr', psnr)
if args.render_train:
if args.shuffle_poses:
print('Shuffling train poses')
permutation = list(range(len(poses_train)))
random.shuffle(permutation)
poses_train = poses_train[permutation]
trainsavedir = os.path.join(basedir, expname, 'train_imgs{:06d}'.format(start))
os.makedirs(trainsavedir, exist_ok=True)
_, _, psnr = render_path(poses_train.to(device), style_train, hwfs_train, args.chunk, render_kwargs_test, nfs=nf_train, gt_imgs=images_train, savedir=trainsavedir)
print('Saved train set w/ psnr', psnr)
if __name__ == '__main__':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
test()
```
#### File: editnerf/ui/editingapp.py
```python
import copy
import os
import torch
import numpy as np
import imageio
from torch.nn import functional as F
from torchvision import utils, transforms
from PIL import Image
from ui_utils import renormalize, show, labwidget, paintwidget, mean_colors
from rendering import render_path, render
from editing_utils import load_dataset, load_model, load_config, generate_flythrough
from dataset import NerfDataset
from run_nerf_helpers import img2mse, get_rays, to8b, to_disp_img
##########################################################################
# UI
##########################################################################
IMG_SIZE = 128
VERBOSE = True
N_ITERS = {'color': 100, 'removal': 100, 'addition': 10000}
LR = 0.001
N_RAYS = {'color': 64, 'removal': 8192}
class NeRFEditingApp(labwidget.Widget):
def __init__(self, instance, config, use_cached=True, expname=None, edit_type=None, num_canvases=9, shape_params='fusion_shape_branch', color_params='color_branch', randneg=8192, device='cuda:0'):
super().__init__(style=dict(border="3px solid gray", padding="8px", display="inline-block"))
torch.set_default_tensor_type('torch.cuda.FloatTensor' if device == 'cuda:0' else 'cpu')
self.edit_type = edit_type
self.instance = instance
self.num_canvases = num_canvases
self.shape_params = shape_params
self.color_params = color_params
self.size = IMG_SIZE
self.randneg = randneg
self.device = device
self.msg_out = labwidget.Div()
self.editing_canvas = paintwidget.PaintWidget(image='', width=self.size * 3, height=self.size * 3).on('mask', self.change_mask)
self.editing_canvas.index = -1
self.copy_canvas = paintwidget.PaintWidget(image='', width=self.size * 2, height=self.size * 2).on('mask', self.copy)
self.copy_mask = None
inline = dict(display='inline', border="2px solid gray")
self.toggle_rgbs_disps_btn = labwidget.Button('show depth', style=inline).on('click', self.toggle_rgb_disps)
self.positive_mask_btn = labwidget.Button(self.pad('edit color'), style=inline).on('click', self.positive_mask)
self.addition_mask_btn = labwidget.Button(self.pad('add shape'), style=inline).on('click', self.add)
self.sigma_mask_btn = labwidget.Button(self.pad('remove shape'), style=inline).on('click', self.sigma_mask)
self.color_from_btn = labwidget.Button(self.pad('transfer color'), style=inline).on('click', self.color_from)
self.shape_from_btn = labwidget.Button(self.pad('transfer shape'), style=inline).on('click', self.shape_from)
self.execute_btn = labwidget.Button(self.pad('execute'), style=inline).on('click', self.execute_edit)
self.brushsize_textbox = labwidget.Textbox(5, desc='brushsize: ', size=3).on('value', self.change_brushsize)
self.target = None
self.use_color_cache = True
self.color_style = dict(display='inline', border="2px solid white")
trn = transforms.Compose([transforms.Resize(32), transforms.ToTensor()])
bg_img = trn(Image.open('bg.png').convert('RGB'))
bg_img = renormalize.as_url(bg_img * 2 - 1)
self.color_pallete = [labwidget.Image(src=bg_img, style=self.color_style).on('click', self.set_color)]
self.color_pallete[-1].index = 0
self.color_pallete[-1].color_type = 'bg'
for color in mean_colors.colors.values():
image = torch.zeros(3, 32, 32)
image[0, :, :] = color[0]
image[1, :, :] = color[1]
image[2, :, :] = color[2]
image = image / 255. * 2 - 1
self.color_pallete.append(labwidget.Image(src=renormalize.as_url(image), style=self.color_style).on('click', self.set_color))
self.color_pallete[-1].index = len(self.color_pallete) - 1
self.color_pallete[-1].color_type = 'color'
# TODO: Highlight the white box with black for clarity
self.color = None
self.mask_type = None
self.real_canvas_array = []
self.real_images_array = []
self.positive_masks = []
train, test, optimizer, styles = load_model(instance, config, expname=expname)
poses, hwfs, cache, args = load_dataset(instance, config, num_canvases=num_canvases, N_instances=styles.shape[0], expname=expname, use_cached=use_cached)
self.parentdir = load_config(config).expname
self.expname = expname if expname else self.parentdir
self.savedir = os.path.join(self.expname, str(instance))
os.makedirs(self.savedir, exist_ok=True)
self.poses = poses.to(device)
self.cache = cache
self.chunk = args.chunk
self.near = args.blender_near
self.far = args.blender_far
self.nfs = [[self.near, self.far]] * self.poses.shape[0]
self.hwfs = hwfs.to(device)
self.old_fine_network = dict(copy.deepcopy(test['network_fine']).named_parameters())
self.train_kwargs = train
self.test_kwargs = test
self.optimizer = None
self.all_instance_styles = styles
self.instance_style = styles[instance].unsqueeze(dim=0).to(device)
if cache is not None:
self.weights = cache['weights']
self.alphas = cache['alphas']
self.features = cache['features']
else:
self.weights = None
self.alphas = None
self.features = None
self.trn = transforms.Compose([transforms.Resize(128), transforms.ToTensor()])
self.transfer_instances_array = [labwidget.Image(src='').on('click', self.change_target) for _ in range(12)]
self.addition_instances_array = [labwidget.Image(src='').on('click', self.change_target) for _ in range(12)]
images, disps = self.render(self.poses, self.instance_style, verbose=False, get_disps=True)
for i, image in enumerate(images):
resized = F.interpolate(image.unsqueeze(dim=0), size=(self.size, self.size)).squeeze(dim=0)
disp_img = torch.from_numpy(to8b(to_disp_img(disps[i]))).unsqueeze(dim=0) / 255.
resized_disp = F.interpolate(disp_img.unsqueeze(dim=0), size=(self.size, self.size)).squeeze(dim=0)
self.real_images_array.append(labwidget.Image(
src=renormalize.as_url(resized)).on('click', self.set_editing_canvas))
self.real_images_array[-1].index = i
self.real_canvas_array.append(paintwidget.PaintWidget(
image=renormalize.as_url(image),
width=self.size * 3, height=self.size * 3).on('mask', self.change_mask))
self.real_canvas_array[-1].index = i
self.real_canvas_array[-1].negative_mask = ''
self.real_canvas_array[-1].resized_image = renormalize.as_url(resized)
self.real_canvas_array[-1].resized_disp = renormalize.as_url(resized_disp)
self.real_canvas_array[-1].disp = renormalize.as_url(disp_img)
self.real_canvas_array[-1].orig = renormalize.as_url(image)
self.positive_masks.append(torch.zeros(image.shape).cpu())
self.show_rgbs = True
self.change_brushsize()
self.editname_textbox = labwidget.Datalist(choices=self.saved_names(), style=inline)
self.save_btn = labwidget.Button('save', style=inline).on('click', self.save)
self.load_btn = labwidget.Button('load', style=inline).on('click', self.load)
def pad(self, s, total=14):
white = ' ' * ((total - len(s)) // 2)
return white + s + white
def make_trasparent(self):
for button in [self.sigma_mask_btn, self.positive_mask_btn, self.addition_mask_btn, self.color_from_btn, self.shape_from_btn]:
button.style = {'display': 'inline', 'color': 'grey', 'border': "1px solid grey"}
def negative_mask(self):
self.mask_type = 'negative'
if self.editing_canvas.image != '':
self.editing_canvas.mask = self.real_canvas_array[self.editing_canvas.index].negative_mask
def positive_mask(self):
self.mask_type = 'positive'
self.make_trasparent()
self.positive_mask_btn.style = {'display': 'inline', 'color': 'black', 'border': "2px solid black"}
self.editing_canvas.mask = ''
def sigma_mask(self):
self.mask_type = 'sigma'
self.make_trasparent()
self.sigma_mask_btn.style = {'display': 'inline', 'color': 'black', 'border': "2px solid black"}
self.editing_canvas.mask = ''
def from_editing_canvas(self):
self.real_canvas_array[self.editing_canvas.index].image = self.editing_canvas.image
def update_canvas(self, images, disps=None):
for i, image in enumerate(images):
resized_rgb = F.interpolate(image.unsqueeze(dim=0), size=(self.size, self.size)).squeeze(dim=0)
self.real_images_array[i].src = renormalize.as_url(resized_rgb)
self.real_canvas_array[i].image = renormalize.as_url(image)
self.real_canvas_array[i].resized_image = renormalize.as_url(resized_rgb)
if disps is not None:
disp_img = torch.from_numpy(to8b(to_disp_img(disps[i]))).unsqueeze(dim=0) / 255.
resized_disp = F.interpolate(disp_img.unsqueeze(dim=0), size=(self.size, self.size)).squeeze(dim=0)
self.real_canvas_array[i].resized_disp = renormalize.as_url(resized_disp)
self.real_canvas_array[i].disp = renormalize.as_url(disp_img)
if self.editing_canvas.index >= 0:
self.editing_canvas.image = self.real_canvas_array[self.editing_canvas.index].image
def toggle_rgb_disps(self):
self.show_rgbs = not self.show_rgbs
for i in range(len(self.real_canvas_array)):
if self.show_rgbs:
self.real_images_array[i].src = self.real_canvas_array[i].resized_image
else:
self.real_images_array[i].src = self.real_canvas_array[i].resized_disp
if self.show_rgbs:
self.toggle_rgbs_disps_btn.label = 'show depth'
else:
self.toggle_rgbs_disps_btn.label = 'show rgbs'
def set_color(self, evt):
for i in range(len(self.color_pallete)):
self.color_pallete[i].style = {'display': 'inline', 'border': "2px solid white"}
evt.target.style = {'display': 'inline', 'border': "1px solid black"}
if evt.target.color_type == 'bg':
self.negative_mask()
else:
image = renormalize.from_url(evt.target.src) / 2 + 0.5
image = image * 255
self.color = [int(x) * 2 / 255. - 1 for x in image[:, 0, 0]]
color = torch.zeros((3, self.size * 2, self.size * 2)).cpu()
color[0, :, :] = self.color[0]
color[1, :, :] = self.color[1]
color[2, :, :] = self.color[2]
self.color = color
def change_brushsize(self):
brushsize = int(self.brushsize_textbox.value)
for c in self.real_canvas_array:
c.brushsize = brushsize
self.editing_canvas.brushsize = brushsize
self.copy_canvas.brushsize = brushsize
def set_editing_canvas(self, evt):
self.editing_canvas.image = self.real_canvas_array[evt.target.index].image
self.editing_canvas.index = self.real_canvas_array[evt.target.index].index
if self.mask_type == 'negative':
self.editing_canvas.mask = self.real_canvas_array[evt.target.index].negative_mask
else:
self.editing_canvas.mask = ''
def add(self, ev):
self.edit_type = 'addition'
self.make_trasparent()
self.addition_mask_btn.style = {'display': 'inline', 'color': 'black', 'border': "2px solid black"}
self.display_addition_instance()
def color_from(self, ev):
self.edit_type = 'color_from'
self.make_trasparent()
self.color_from_btn.style = {'display': 'inline', 'color': 'black', 'border': "2px solid black"}
self.display_transfer_instance()
def shape_from(self, ev):
self.edit_type = 'shape_from'
self.make_trasparent()
self.shape_from_btn.style = {'display': 'inline', 'color': 'black', 'border': "2px solid black"}
self.display_transfer_instance()
def display_transfer_instance(self):
for i in range(12):
self.transfer_instances_array[i].src = renormalize.as_url(self.trn(Image.open(os.path.join(self.parentdir, 'instances', '{:03d}.png'.format(i)))) * 2 - 1)
self.transfer_instances_array[i].index = i
def display_addition_instance(self):
for i in range(12):
self.addition_instances_array[i].src = renormalize.as_url(self.trn(Image.open(os.path.join(self.parentdir, 'instances', '{:03d}.png'.format(i)))) * 2 - 1)
self.addition_instances_array[i].index = i
def render(self, poses, style, verbose=True, get_disps=False, update=False, update_cache=True, inds=None, use_cache=True):
def cb(i):
if verbose and VERBOSE:
self.msg_out.print(f'Rendering view: {i+1}/{len(poses)}', replace=True)
def update_cb(i, rgb):
if update:
img = torch.tensor(rgb).permute(2, 0, 1) * 2 - 1
resized = F.interpolate(img.unsqueeze(dim=0), size=(self.size, self.size)).squeeze(dim=0)
self.real_images_array[i].src = renormalize.as_url(resized)
else:
pass
with torch.no_grad():
styles = style.repeat((poses.shape[0], 1))
if self.use_color_cache and use_cache and self.alphas and self.features and self.weights:
if inds:
alpha_cache = [self.alphas[i] for i in inds]
feature_cache = [self.features[i] for i in inds]
weights_cache = [self.weights[i] for i in inds]
else:
alpha_cache = self.alphas
feature_cache = self.features
weights_cache = self.weights
images, disps, _, = render_path(poses, styles, self.hwfs, self.chunk, self.test_kwargs, nfs=self.nfs, alpha_cache=alpha_cache, feature_cache=feature_cache, weights_cache=weights_cache, verbose=False, cb=cb, update_cb=update_cb)
else:
images, disps, _, alphas, features, weights = render_path(poses, styles, self.hwfs, self.chunk, self.test_kwargs, nfs=self.nfs, verbose=False, cb=cb, update_cb=update_cb, get_cached='color')
if update_cache:
self.alphas = alphas
self.features = features
self.weights = weights
images = torch.tensor(images).permute(0, 3, 1, 2) * 2 - 1
if get_disps:
return images, disps
else:
return images
def target_transfer(self, instancenum, index):
self.copy_canvas.mask = ''
self.copy_canvas.index = index
self.copy_canvas.instance_style = self.all_instance_styles[instancenum].unsqueeze(dim=0)
rgb = self.render(self.poses[index].unsqueeze(dim=0), self.copy_canvas.instance_style.squeeze(dim=0), verbose=False, use_cache=False)
self.copy_canvas.image = renormalize.as_url(F.interpolate(rgb, size=(self.size, self.size))[0])
def change_mask(self, ev):
if self.mask_type == 'positive' or self.mask_type == 'sigma':
i = self.editing_canvas.index
orig_img = renormalize.from_url(self.editing_canvas.image)
mask = renormalize.from_url(self.editing_canvas.mask) / 2 + 0.5
mask = F.interpolate(mask.unsqueeze(dim=0), size=(self.size * 2, self.size * 2)).squeeze()
if self.mask_type == 'positive':
self.edit_type = 'color'
if self.color is None:
self.show_msg('Please select a color.')
if ev.target.image != '':
self.real_canvas_array[ev.target.index].negative_mask = ''
return
edited_img = orig_img * (1 - mask) + mask * self.color
elif self.mask_type == 'sigma':
self.edit_type = 'removal'
edited_img = orig_img * (1 - mask) + mask * torch.ones((3, self.size * 2, self.size * 2)).to(mask.device)
self.positive_masks[i] += mask
self.real_canvas_array[i].image = renormalize.as_url(edited_img)
self.editing_canvas.image = renormalize.as_url(edited_img)
self.real_images_array[i].src = renormalize.as_url(F.interpolate(edited_img.unsqueeze(dim=0), size=(self.size, self.size)).squeeze())
self.editing_canvas.mask = ''
elif self.mask_type == 'negative':
i = ev.target.index
self.real_canvas_array[i].negative_mask = self.editing_canvas.mask
elif self.copy_mask is not None:
self.paste()
else:
if ev.target.image != '':
self.real_canvas_array[ev.target.index].negative_mask = ''
def render_editing_canvas(self, style):
index = self.editing_canvas.index
pose = self.poses[index].unsqueeze(dim=0)
self.editing_canvas.image = renormalize.as_url(self.render(pose, style, verbose=False, inds=[index], use_cache=self.edit_type == 'color_from', update_cache=False)[0])
def change_target(self, ev):
self.target = ev.target.index
if self.edit_type == 'color_from':
target_style = self.all_instance_styles[self.target].unsqueeze(dim=0).cuda()
new_style = torch.cat([self.instance_style[:, :32], target_style[:, 32:]], dim=1)
self.render_editing_canvas(new_style)
elif self.edit_type == 'shape_from':
target_style = self.all_instance_styles[self.target].unsqueeze(dim=0).cuda()
new_style = torch.cat([target_style[:, :32], self.instance_style[:, 32:]], dim=1)
self.render_editing_canvas(new_style)
elif self.edit_type == 'addition':
if self.editing_canvas.image != '':
self.target_transfer(self.target, self.editing_canvas.index)
def copy(self, ev):
self.copy_mask = self.copy_canvas.mask
tgt_style = self.copy_canvas.instance_style
index = self.copy_canvas.index
area = renormalize.from_url(self.copy_mask, target='pt', size=(256, 256))[0]
t, l, b, r = positive_bounding_box(area)
H, W, focal = self.hwfs[0]
H, W = H.item(), W.item()
with torch.no_grad():
rays_o, rays_d = get_rays(int(H), int(W), focal, self.poses[index])
rays_o, rays_d = rays_o[t:b, l:r], rays_d[t:b, l:r]
rays_o, rays_d = rays_o.contiguous().view(-1, rays_o.shape[-1]), rays_d.contiguous().view(-1, rays_d.shape[-1])
batch_rays = torch.stack([rays_o, rays_d], 0)
# render the rays under the editing canvas color style
style = torch.cat([tgt_style[:, :32], self.instance_style[:, 32:]], dim=1)
style = style.repeat((batch_rays.shape[1], 1))
rgb, disp, acc, extras = render(H, W, focal.item(), style=style, rays=batch_rays, **self.test_kwargs)
self.copy_canvas.rgb = rgb.view(b - t, r - l, -1).cpu() * 2 - 1
self.copy_canvas.mask = ''
def paste(self):
if self.copy_mask is None:
self.show_msg('Please select a region to copy first.')
return
copy_to = renormalize.from_url(self.editing_canvas.mask, target='pt', size=(256, 256))[0]
area = renormalize.from_url(self.copy_mask, target='pt', size=(256, 256))[0]
t, l, b, r = positive_bounding_box(area)
area = area[t:b, l:r]
target_rgb = self.copy_canvas.rgb
source_rgb = renormalize.from_url(self.editing_canvas.image).permute(1, 2, 0)
rendered_img = paste_clip_at_center(source_rgb, target_rgb, centered_location(copy_to), area)[0].permute(2, 0, 1)
self.editing_canvas.mask = ''
self.editing_canvas.image = renormalize.as_url(rendered_img)
self.positive_masks[self.editing_canvas.index] += copy_to
self.real_images_array[self.editing_canvas.index].src = renormalize.as_url(F.interpolate(rendered_img.unsqueeze(dim=0), size=(self.size, self.size)).squeeze())
self.from_editing_canvas()
def execute_edit(self):
if self.edit_type == 'color':
self.toggle_grad()
self.toggle_color_edit()
self.create_color_dataset()
self.optimize()
if self.edit_type == 'removal':
self.use_color_cache = False
self.toggle_grad()
self.toggle_shape_edit()
self.create_remove_dataset()
self.get_cache()
self.optimize()
if self.edit_type == 'addition':
self.use_color_cache = False
self.toggle_grad()
self.toggle_shape_edit()
self.create_addition_dataset()
self.optimize()
if self.edit_type == 'color_from':
target_style = self.all_instance_styles[self.target].unsqueeze(dim=0).cuda()
self.instance_style = torch.cat([self.instance_style[:, :32], target_style[:, 32:]], dim=1)
if self.edit_type == 'shape_from':
self.use_color_cache = False
target_style = self.all_instance_styles[self.target].unsqueeze(dim=0).cuda()
self.instance_style = torch.cat([target_style[:, :32], self.instance_style[:, 32:]], dim=1)
rgbs, disps = self.render(self.poses, self.instance_style, get_disps=True, update=True)
self.use_color_cache = True
self.update_canvas(rgbs, disps)
def get_image_dataset(self):
images = []
poses = []
positive_masks = []
negative_masks = []
for i in range(self.num_canvases):
# TODO: speed the .sum() up by having an edited field
if self.real_canvas_array[i].negative_mask != '' or self.positive_masks[i].sum() != 0:
image = renormalize.from_url(self.real_canvas_array[i].image) / 2 + 0.5
if self.real_canvas_array[i].negative_mask != '':
negative_mask = renormalize.from_url(self.real_canvas_array[i].negative_mask) / 2 + 0.5
negative_mask = F.interpolate(negative_mask.unsqueeze(dim=0), size=(self.size * 2, self.size * 2)).squeeze()
negative_masks.append((negative_mask > 0).float().clamp_(0, 1))
else:
negative_masks.append(torch.zeros(self.positive_masks[i].shape).cpu())
if self.positive_masks[i].sum() != 0:
positive_masks.append(self.positive_masks[i].clamp_(0, 1))
else:
positive_masks.append(torch.zeros(negative_masks[-1].shape).cpu())
images.append(image)
poses.append(self.poses[i])
images = torch.stack(images).permute(0, 2, 3, 1)
positive_masks = torch.stack(positive_masks).permute(0, 2, 3, 1)
negative_masks = torch.stack(negative_masks).permute(0, 2, 3, 1)
poses = torch.stack(poses)
return images, positive_masks, negative_masks, poses
def create_color_dataset(self):
if self.color_params == 'color_branch':
self.optimizer = torch.optim.Adam(params=list(self.train_kwargs['network_fine'].color_branch()), lr=LR, betas=(0.9, 0.999))
elif self.color_params == 'color_code':
self.optimizer = None
elif self.color_params == 'whole_network':
self.optimizer = torch.optim.Adam(params=list(self.train_kwargs['network_fine'].parameters()), lr=LR, betas=(0.9, 0.999))
images, positive_masks, negative_masks, poses = self.get_image_dataset()
self.dataset = NerfDataset(images, poses, positive_masks, negative_masks, self.instance_style, self.hwfs, self.device, self.edit_type, N_rays=N_RAYS[self.edit_type], optimize_code=True, lr=LR)
def create_remove_dataset(self):
if self.shape_params == 'fusion_shape_branch':
self.optimizer = torch.optim.Adam(params=list(self.train_kwargs['network_fine'].fusion_shape_branch()), lr=LR, betas=(0.9, 0.999))
optimize_code = False
elif self.shape_params == 'shape_branch':
self.optimizer = torch.optim.Adam(params=list(self.train_kwargs['network_fine'].shape_branch()), lr=LR, betas=(0.9, 0.999))
optimize_code = False
elif self.shape_params == 'shape_code':
self.optimizer = None
optimize_code = True
elif self.shape_params == 'whole_network':
self.optimizer = torch.optim.Adam(params=list(self.train_kwargs['network_fine'].parameters()), lr=LR, betas=(0.9, 0.999))
optimize_code = True
images, positive_masks, negative_masks, poses = self.get_image_dataset()
self.dataset = NerfDataset(images, poses, positive_masks, negative_masks, self.instance_style, self.hwfs, self.device, self.edit_type, randneg=self.randneg, lr=LR, N_rays=N_RAYS[self.edit_type], optimize_code=optimize_code, use_cached=True)
def create_addition_dataset(self):
params = list(self.train_kwargs['network_fine'].fusion_shape_branch()) + list(self.train_kwargs['network_fn'].fusion_shape_branch())
self.optimizer = torch.optim.Adam(params=params, lr=LR, betas=(0.9, 0.999))
images, positive_masks, negative_masks, poses = self.get_image_dataset()
self.dataset = NerfDataset(images, poses, positive_masks, negative_masks, self.instance_style, self.hwfs, self.device, self.edit_type, randneg=self.randneg, lr=LR)
def get_cache(self):
if self.shape_params == 'fusion_shape_branch' and self.color_params == 'color_branch':
with torch.no_grad():
self.train_kwargs['network_fine'].get_cached = 'shape' if self.edit_type in ('addition', 'removal') else 'color'
self.train_kwargs.update({'near': self.near, 'far': self.far})
H, W, f = self.hwfs[0]
features, weights = [], []
for i in range(len(self.dataset)):
batch_rays, _, style, _, _, _, _ = self.dataset.get_data_batch(all_rays=True, imgnum=i)
rgb, disp, acc, extras = render(H, W, f, style=style, rays=batch_rays, **self.train_kwargs)
features.append(extras['features'])
weights.append(extras['weights0'])
if self.edit_type in ('addition', 'removal'):
self.dataset.shape_features = features
else:
self.dataset.color_features = features
self.dataset.weights = weights
self.train_kwargs['network_fine'].get_cached = None
def optimize(self):
niter = N_ITERS[self.edit_type]
H, W, f = self.hwfs[0]
if self.optimizer is not None:
for param_group in self.optimizer.param_groups:
param_group['lr'] = LR
for i in range(niter):
batch_rays, target_s, style, mask_rays, shape_features, color_features, weights = self.dataset.get_data_batch()
if shape_features is not None:
features = shape_features
elif color_features is not None:
features = color_features
else:
features = None
self.train_kwargs.update({'near': self.near, 'far': self.far})
if self.optimizer is not None:
self.optimizer.zero_grad()
rgb, disp, acc, extras = render(H, W, f, style=style, rays=batch_rays, feature=features, weights=weights, **self.train_kwargs)
loss = img2mse(rgb, target_s)
if self.edit_type == 'addition':
loss += img2mse(extras['rgb0'], target_s)
weight_change_loss = torch.tensor(0.)
for k, v in self.train_kwargs['network_fine'].named_parameters():
if 'weight' in k:
weight_change_loss += (self.old_fine_network[k] - v).pow(2).mean()
weight_change_loss = 10 * weight_change_loss
loss += weight_change_loss
if self.edit_type == 'removal':
sigma_loss = 0.01 * (mask_rays * (-extras['weights'] * (extras['weights'] + 1e-7).log()).sum(dim=1)).mean()
loss += sigma_loss
else:
sigma_loss = torch.tensor(0.)
loss.backward()
if self.optimizer is not None:
self.optimizer.step()
if VERBOSE:
self.msg_out.print(f'Iter {i+1}/{niter}, Loss: {loss.item():.4f}', replace=True)
else:
self.msg_out.print(f'Iter {i+1}/{niter}', replace=True)
def toggle_grad(self):
for n, p in self.train_kwargs['network_fn'].named_parameters():
p.requires_grad_(True)
for n, p in self.train_kwargs['network_fine'].named_parameters():
p.requires_grad_(True)
def toggle_color_edit(self):
self.train_kwargs['perturb'] = 0
self.train_kwargs['perturb_coarse'] = 0
for n, p in self.train_kwargs['network_fn'].named_parameters():
p.requires_grad_(False)
for n, p in self.train_kwargs['network_fine'].named_parameters():
if not any([s in n for s in ['style_linears.2', 'rgb_linear.', 'views_linears.']]):
p.requires_grad_(False)
def toggle_shape_edit(self):
if self.edit_type == 'addition':
self.train_kwargs['perturb'] = 1
self.train_kwargs['perturb_coarse'] = 1
else:
self.train_kwargs['perturb'] = 0
self.train_kwargs['perturb_coarse'] = 0
def save(self):
if self.editname_textbox.value == '':
self.show_msg('Please enter a name to save your file')
return
savedir = os.path.join(self.savedir, self.editname_textbox.value)
# clear the savedir if conflicting
if os.path.exists(savedir):
for x in os.listdir(savedir):
os.remove(os.path.join(savedir, x))
os.makedirs(savedir, exist_ok=True)
for i in range(self.num_canvases):
if self.real_canvas_array[i].negative_mask != '':
torch.save(self.real_canvas_array[i].negative_mask, os.path.join(savedir, f'{i}_neg.pt'))
if self.positive_masks[i].sum() != 0:
image = renormalize.from_url(self.real_canvas_array[i].image) / 2 + 0.5
utils.save_image(image, os.path.join(savedir, f'{i}_rgb.png'))
torch.save(self.positive_masks[i].clamp_(0, 1), os.path.join(savedir, f'{i}_pos.pt'))
with open(os.path.join(savedir, 'edit_type.txt'), 'w') as f:
f.write(f'{self.edit_type}')
self.show_msg('Done saving')
def load(self):
if self.editname_textbox.value == '':
self.show_msg('Please enter a file name to load')
return
savedir = os.path.join(self.savedir, self.editname_textbox.value)
if not os.path.exists(savedir):
self.show_msg(f'{savedir} does not exist')
return
with open(os.path.join(savedir, 'edit_type.txt')) as f:
self.edit_type = f.readlines()[0].strip()
trn = transforms.ToTensor()
for i in range(self.num_canvases):
if os.path.exists(os.path.join(savedir, f'{i}_rgb.png')):
image = trn(Image.open(os.path.join(savedir, f'{i}_rgb.png'))) * 2 - 1
self.real_canvas_array[i].image = renormalize.as_url(image)
self.real_canvas_array[i].resized_image = renormalize.as_url(F.interpolate(image.unsqueeze(dim=0), size=(self.size, self.size)).squeeze())
self.real_images_array[i].src = self.real_canvas_array[i].resized_image
if os.path.exists(os.path.join(savedir, f'{i}_pos.pt')):
self.positive_masks[i] = torch.load(os.path.join(savedir, f'{i}_pos.pt'))
if os.path.exists(os.path.join(savedir, f'{i}_neg.pt')):
self.real_canvas_array[i].negative_mask = torch.load(os.path.join(savedir, f'{i}_neg.pt'))
def saved_names(self):
return [x for x in os.listdir(self.savedir) if os.path.exists(os.path.join(self.savedir, x, 'edit_type.txt'))]
def show_msg(self, msg):
self.msg_out.clear()
self.msg_out.print(msg, replace=False)
def widget_html(self):
def h(w):
return w._repr_html_()
html = f'''<div {self.std_attrs()}>
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.toggle_rgbs_disps_btn)}
</div>
<div style="display:inline-block; width:{5.00 * self.size + 2}px;
text-align:right">
{h(self.editname_textbox)}
{h(self.save_btn)}
{h(self.load_btn)}
</div>
<div style="margin-top: 8px; margin-bottom: 8px;">
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.positive_mask_btn)}
</div>
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.sigma_mask_btn)}
</div>
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.addition_mask_btn)}
</div>
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.color_from_btn)}
</div>
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.shape_from_btn)}
</div>
<div style="display:inline-block; width:{1.00 * self.size + 2}px;
text-align:center">
{h(self.execute_btn)}
</div>
</div>
<div>
<div style="display:inline-block;
width:{(self.size + 2) * 4}px;
height:{40}px;
vertical-align:top;
overflow-y: scroll;
text-align:center">
{show.html([[x] for x in self.color_pallete])}
</div>
<div style="display:inline-block;
width:{(self.size + 2) * 2 + 12}px;
height:{40}px;
vertical-align:top;
overflow-y: scroll;
text-align:center">
{h(self.msg_out)}
</div>
<div>
<div style="width:{(self.size + 2) * 6 + 20}px;">
<hr style="border:2px dashed gray; background-color: white">
</div>
<div>
{h(self.editing_canvas)}
<div style="display:inline-block;
width:{(self.size + 2) * 3 + 20}px;
height:{(self.size + 2) * 3 + 20}px;
vertical-align:top;
overflow-y: scroll;
text-align:center">
{show.html([[c] for c in self.real_images_array])}
</div>
</div>
<div style="width:{(self.size + 2) * 6 + 20}px;">
<hr style="border:2px dashed gray; background-color: white">
</div>
<div>
<div style="display:inline-block;
width:{(self.size + 2) * 6 + 20}px;
height:{140}px;
vertical-align:top;
overflow-y: scroll;
text-align:center">
{show.html([[c] for c in self.transfer_instances_array])}
</div>
</div>
<div style="width:{(self.size + 2) * 6 + 20}px;">
<hr style="border:2px dashed gray; background-color: white">
</div>
<div>
{h(self.copy_canvas)}
<div style="display:inline-block;
width:{(self.size + 2) * 4 + 20}px;
height:{(self.size * 2)}px;
vertical-align:top;
overflow-y: scroll;
text-align:center">
{show.html([[c] for c in self.addition_instances_array])}
</div>
</div>
</div>
'''
return html
##########################################################################
# Utility functions
##########################################################################
def positive_bounding_box(data):
pos = (data > 0)
v, h = pos.sum(0).nonzero(), pos.sum(1).nonzero()
left, right = v.min().item(), v.max().item()
top, bottom = h.min().item(), h.max().item()
return top, left, bottom + 1, right + 1
def centered_location(data):
t, l, b, r = positive_bounding_box(data)
return (t + b) // 2, (l + r) // 2
def paste_clip_at_center(source, clip, center, area=None):
source = source.unsqueeze(dim=0).permute(0, 3, 1, 2)
clip = clip.unsqueeze(dim=0).permute(0, 3, 1, 2)
target = source.clone()
t, l = (max(0, min(e - s, c - s // 2))
for s, c, e in zip(clip.shape[2:], center, source.shape[2:]))
b, r = t + clip.shape[2], l + clip.shape[3]
# TODO: consider copying over a subset of channels.
target[:, :, t:b, l:r] = clip if area is None else (
(1 - area)[None, None, :, :].to(target.device) *
target[:, :, t:b, l:r] +
area[None, None, :, :].to(target.device) * clip)
target = target.squeeze().permute(1, 2, 0)
return target, (t, l, b, r)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--instance', type=int)
parser.add_argument('--randneg', type=int, default=8192)
parser.add_argument('--config')
parser.add_argument('--expname')
parser.add_argument('--editname')
parser.add_argument('--second_editname')
parser.add_argument('--shape_params', default='fusion_shape_branch')
parser.add_argument('--color_params', default='color_branch')
parser.add_argument('--video', action='store_true')
args = parser.parse_args()
writer = NeRFEditingApp(instance=args.instance, expname=args.expname, config=args.config, shape_params=args.shape_params, color_params=args.color_params, randneg=args.randneg, num_canvases=9, use_cached=False)
expname = writer.expname
editnames = [args.editname]
if args.second_editname:
editnames.append(args.second_editname)
for editname in editnames:
if editname:
savedir = os.path.join(expname, editname)
if args.shape_params != 'fusion_shape_branch':
savedir = os.path.join(savedir, args.shape_params)
if args.color_params != 'color_branch':
savedir = os.path.join(savedir, args.color_params)
if args.randneg != 8192:
savedir += f'_{args.randneg}'
os.makedirs(savedir, exist_ok=True)
print('Working in', savedir)
# load and execute the edit
writer.editname_textbox.value = editname
writer.load()
writer.execute_edit()
else:
savedir = os.path.join(expname, 'flythroughs', str(args.instance))
os.makedirs(savedir, exist_ok=True)
all_poses = torch.tensor(np.load(os.path.join(expname, 'poses.npy')))
all_hwfs = torch.tensor(np.load(os.path.join(expname, 'hwfs.npy')))
if args.expname:
N_per_instance = 1
else:
N_per_instance = all_poses.shape[0] // writer.all_instance_styles.shape[0]
ps, pe = args.instance * N_per_instance, (args.instance + 1) * N_per_instance
all_poses = all_poses[ps:pe]
if args.video:
all_poses, all_hwfs = generate_flythrough(all_poses[0].cpu(), all_hwfs[0], num_poses=100)
nfs = [[writer.near, writer.far]] * all_poses.shape[0]
styles = writer.instance_style.repeat((all_poses.shape[0], 1))
with torch.no_grad():
print(f'Saving samples in {savedir}')
rgbs, disps, psnr = render_path(all_poses, styles, all_hwfs, writer.chunk, writer.test_kwargs, nfs=nfs, savedir=savedir, verbose=True)
if args.video:
imageio.mimwrite(os.path.join(savedir, 'video.mp4'), to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(savedir, 'disps.mp4'), to8b(disps / np.max(disps)), fps=30, quality=8)
```
#### File: editnerf/ui/run_nerf_helpers.py
```python
from torchsearchsorted import searchsorted
import numpy as np
import torch
torch.autograd.set_detect_anomaly(True)
TEST = False
# Misc
def img2mse(x, y): return torch.mean((x - y) ** 2)
def img2l1(x, y): return torch.mean((x - y).abs())
def mse2psnr(x): return -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
def to8b(x): return (255 * np.clip(x, 0, 1)).astype(np.uint8)
def to_disp_img(disp):
# clip outliers
#disp = 1. / disp
min_disp, max_disp = np.percentile(disp, [5, 95])
disp[disp < min_disp] = min_disp
disp[disp > max_disp] = max_disp
# disp = disp - disp.min() #normalize to have [0, max]
disp = disp / disp.max() # normalize in [0, 1]
return disp
# Ray helpers
def get_rays(H, W, focal, c2w):
i, j = torch.meshgrid(torch.linspace(0, W - 1, W), torch.linspace(0, H - 1, H)) # pytorch's meshgrid has indexing='ij'
i = i.t()
j = j.t()
wfactor, hfactor = focal.item(), focal.item()
if focal < 10: # super hacky
# normalize to [-1, 1]
wfactor *= (W * .5)
hfactor *= (H * .5)
# inside [-200, 200] (400/2), we only want to render from [-128/200, 128/200]
wfactor *= (200. / 128.)
hfactor *= (200. / 128.)
dirs = torch.stack([(i - W * .5) / wfactor, -(j - H * .5) / hfactor, -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3, -1].expand(rays_d.shape)
return rays_o, rays_d
# Hierarchical sampling (section 5.2)
def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0., 1., steps=N_samples)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples])
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
new_shape = list(cdf.shape[:-1]) + [N_samples]
if det:
u = np.linspace(0., 1., N_samples)
u = np.broadcast_to(u, new_shape)
else:
u = np.random.rand(*new_shape)
u = torch.Tensor(u)
# Invert CDF
u = u.contiguous()
inds = searchsorted(cdf, u, side='right')
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
```
#### File: editnerf/utils/evaluate_reconstruction.py
```python
from metrics import PSNR, SSIM, LPIPS
from torchvision import transforms
from PIL import Image
import numpy as np
import torch
import os
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser('Used for evaluating edit quality')
parser.add_argument('--expdir', help='where the generated samples are')
args = parser.parse_args()
expdir = [os.path.join(args.expdir, f) for f in sorted(os.listdir(args.expdir)) if 'test_imgs' in f][-1]
print(expdir)
trn = transforms.ToTensor()
psnr = PSNR(float)
ssim = SSIM(float)
lpips = LPIPS(float)
def load_generated_images(path, N=100):
images = []
for i in range(N):
f = os.path.join(path, '{:04d}_rgb.png'.format(i))
if not os.path.exists(f):
return
images.append(trn(Image.open(f)))
return torch.stack(images)
def load_real_images(path, N=100):
images = []
for i in range(N):
f = os.path.join(path, '{:04d}_gt.png'.format(i))
if not os.path.exists(f):
return
images.append(trn(Image.open(f)))
return torch.stack(images)
def get_metrics(fake, real):
np_fake, np_real = fake.permute(0, 2, 3, 1).numpy().astype(np.float64), real.permute(0, 2, 3, 1).numpy().astype(np.float64)
psnr_total = 0
ssim_total = 0
total = 0
for x, y in zip(np_fake, np_real):
psnr_total += psnr(x, y)
ssim_total += ssim(x, y)
total += 1
return psnr_total / total, ssim_total / total, lpips(fake * 2 - 1, real * 2 - 1).mean().item()
generated = load_generated_images(expdir)
real = load_real_images(expdir)
psnr_num, ssim_num, lpips_num = get_metrics(generated, real)
msg = f'PSNR: {psnr_num} SSIM: {ssim_num} LPIPS: {lpips_num}'
with open(os.path.join(expdir, 'numbers.txt'), 'w') as f:
f.write(msg)
print(msg)
```
#### File: editnerf/utils/pidfile.py
```python
import os
import errno
import socket
import atexit
import time
import sys
def exit_if_job_done(directory, redo=False, force=False, verbose=True):
if pidfile_taken(os.path.join(directory, 'lockfile.pid'),
force=force, verbose=verbose):
sys.exit(0)
donefile = os.path.join(directory, 'done.txt')
if os.path.isfile(donefile):
with open(donefile) as f:
msg = f.read()
if redo or force:
if verbose:
print('Removing %s %s' % (donefile, msg))
os.remove(donefile)
else:
if verbose:
print('%s %s' % (donefile, msg))
sys.exit(0)
def mark_job_done(directory):
with open(os.path.join(directory, 'done.txt'), 'w') as f:
f.write('done by %d@%s %s at %s' %
(os.getpid(), socket.gethostname(),
os.getenv('STY', ''),
time.strftime('%c')))
def pidfile_taken(path, verbose=False, force=False):
'''
Usage. To grab an exclusive lock for the remaining duration of the
current process (and exit if another process already has the lock),
do this:
if pidfile_taken('job_423/lockfile.pid', verbose=True):
sys.exit(0)
To do a batch of jobs, just run a script that does them all on
each available machine, sharing a network filesystem. When each
job grabs a lock, then this will automatically distribute the
jobs so that each one is done just once on one machine.
'''
# Try to create the file exclusively and write my pid into it.
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError as e:
if e.errno == errno.EEXIST:
# If we cannot because there was a race, yield the conflicter.
conflicter = 'race'
try:
with open(path, 'r') as lockfile:
conflicter = lockfile.read().strip() or 'empty'
except:
pass
# Force is for manual one-time use, for deleting stale lockfiles.
if force:
if verbose:
print('Removing %s from %s' % (path, conflicter))
os.remove(path)
return pidfile_taken(path, verbose=verbose, force=False)
if verbose:
print('%s held by %s' % (path, conflicter))
return conflicter
else:
# Other problems get an exception.
raise
# Register to delete this file on exit.
lockfile = os.fdopen(fd, 'r+')
atexit.register(delete_pidfile, lockfile, path)
# Write my pid into the open file.
lockfile.write('%d@%s %s\n' % (os.getpid(), socket.gethostname(),
os.getenv('STY', '')))
lockfile.flush()
os.fsync(lockfile)
# Return 'None' to say there was not a conflict.
return None
def delete_pidfile(lockfile, path):
'''
Runs at exit after pidfile_taken succeeds.
'''
if lockfile is not None:
try:
lockfile.close()
except:
pass
try:
os.unlink(path)
except:
pass
``` |
{
"source": "JINGFFF/cmssw",
"score": 2
} |
#### File: DataProcessing/python/Merge.py
```python
from FWCore.ParameterSet.Config import Process, EndPath
from FWCore.ParameterSet.Modules import OutputModule, Source, Service
import FWCore.ParameterSet.Types as CfgTypes
def mergeProcess(*inputFiles, **options):
"""
_mergeProcess_
Creates and returns a merge process that will merge the provided
filenames
supported options:
- process_name : name of the process, defaults to Merge
- outputmod_label : label of the output module, defaults to Merged
- newDQMIO : specifies if the new DQM format should be used to merge the files
- output_file : sets the output file name
- output_lfn : sets the output LFN
- bypassVersionCheck : to bypass version check in case merging happened in lower version of CMSSW (i.e. UL HLT case). This will be TRUE by default.
"""
# //
# // process supported options
#//
processName = options.get("process_name", "Merge")
outputModLabel = options.get("outputmod_label", "Merged")
outputFilename = options.get("output_file", "Merged.root")
outputLFN = options.get("output_lfn", None)
dropDQM = options.get("drop_dqm", False)
newDQMIO = options.get("newDQMIO", False)
mergeNANO = options.get("mergeNANO", False)
bypassVersionCheck = options.get("bypassVersionCheck", True)
# //
# // build process
#//
process = Process(processName)
# //
# // input source
#//
if newDQMIO:
process.source = Source("DQMRootSource")
process.add_(Service("DQMStore"))
else:
process.source = Source("PoolSource")
if dropDQM:
process.source.inputCommands = CfgTypes.untracked.vstring('keep *','drop *_EDMtoMEConverter_*_*')
process.source.fileNames = CfgTypes.untracked(CfgTypes.vstring())
for entry in inputFiles:
process.source.fileNames.append(str(entry))
# //
# // output module
#//
if newDQMIO:
outMod = OutputModule("DQMRootOutputModule")
elif mergeNANO:
import Configuration.EventContent.EventContent_cff
outMod = OutputModule("NanoAODOutputModule",Configuration.EventContent.EventContent_cff.NANOAODEventContent.clone())
process.add_(Service("InitRootHandlers", EnableIMT = CfgTypes.untracked.bool(False)))
else:
outMod = OutputModule("PoolOutputModule")
# To bypass the version check in the merge process (TRUE by default)
process.source.bypassVersionCheck = cms.untracked.bool(bypassVersionCheck)
outMod.fileName = CfgTypes.untracked.string(outputFilename)
if outputLFN != None:
outMod.logicalFileName = CfgTypes.untracked.string(outputLFN)
setattr(process, outputModLabel, outMod)
process.outputPath = EndPath(outMod)
return process
``` |
{
"source": "jinggaizi/test-2",
"score": 2
} |
#### File: asr/rnnt_decoder/transducer_transformer_decoder.py
```python
from typing import Any
from typing import List
from typing import Sequence
from typing import Tuple
import six
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transducer.transformer_decoder_layer import DecoderLayer
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.lightconv import LightweightConvolution
from espnet.nets.pytorch_backend.transformer.lightconv2d import LightweightConvolution2D
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask_limit
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet2.asr.rnnt_decoder.abs_rnnt_decoder import AbsRNNTDecoder
from espnet.nets.pytorch_backend.nets_utils import to_device
from espnet.nets.pytorch_backend.nets_utils import get_activation
import logging
from espnet.nets.pytorch_backend.transducer.utils import check_state
from espnet.nets.pytorch_backend.transducer.joint_network import JointNetwork
class BaseTransducerTransformerDecoder(AbsRNNTDecoder, BatchScorerInterface):
"""Base class of Transducer Transfomer decoder module.
Args:
vocab_size: output dim
encoder_output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of decoder blocks
dropout_rate: dropout rate
self_attention_dropout_rate: dropout rate for attention
input_layer: input layer type
use_output_layer: whether to use output layer
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
blank: int = 0,
joint_activation_type="tanh",
):
assert check_argument_types()
super().__init__()
attention_dim = encoder_output_size
if input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(vocab_size, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(vocab_size, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate),
)
else:
raise ValueError(f"only 'embed' or 'linear' is supported: {input_layer}")
self.normalize_before = normalize_before
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
self.blank = blank
self.odim = vocab_size
# Must set by the inheritance
self.decoders = None
self.joint_network = None
def forward(
self,
hs_pad: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
ys_in_pad:
input token ids, int64 (batch, maxlen_out)
if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
ys_mask: (batch, maxlen_out)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
tgt = self.embed(ys_in_pad)
tgt, tgt_mask = self.decoders(
tgt, ys_mask
)
if self.normalize_before:
tgt = self.after_norm(tgt)
h_enc = hs_pad.unsqueeze(2)
h_dec = tgt.unsqueeze(1)
z = self.joint_network(h_enc, h_dec)
olens = tgt_mask.sum(1)
return z, olens
def forward_one_step(
self,
tgt: torch.Tensor,
tgt_mask: torch.Tensor,
cache: List[torch.Tensor] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""Forward one step.
Args:
tgt: input token ids, int64 (batch, maxlen_out)
tgt_mask: input token mask, (batch, maxlen_out)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
memory: encoded memory, float32 (batch, maxlen_in, feat)
cache: cached output list of (batch, max_time_out-1, size)
Returns:
y, cache: NN output value and cache per `self.decoders`.
y.shape` is (batch, maxlen_out, token)
"""
x = self.embed(tgt)
if cache is None:
cache = [None] * len(self.decoders)
new_cache = []
for c, decoder in zip(cache, self.decoders):
x, tgt_mask = decoder(x, tgt_mask, c)
new_cache.append(x)
if self.normalize_before:
y = self.after_norm(x[:, -1])
else:
y = x[:, -1]
return y, new_cache
def score(self, ys, state, x):
"""Score."""
#tgt_mask = to_device(self, subsequent_mask(len(ys)).unsqueeze(0))
tgt_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
state = check_state(state, (ys.unsqueeze(0).size(1) - 1), self.blank)
logp, state = self.forward_one_step(ys.unsqueeze(0), tgt_mask, cache=state)
logp = torch.log_softmax(self.joint_network(x, logp[0]), dim=-1)
return logp.squeeze(0), state
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.decoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
torch.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
# batch decoding
ys_mask = subsequent_mask(ys.size(-1), device=xs.device).unsqueeze(0)
logp, states = self.forward_one_step(ys, ys_mask, cache=batch_state)
# logp = torch.log_softmax(self.joint(xs, logp), dim=-1)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
return logp, state_list
def recognize(self, h, recog_args, target_left_mask=-1):
"""Greedy search implementation for transformer-transducer.
Args:
h (torch.Tensor): encoder hidden state sequences (maxlen_in, Henc)
recog_args (Namespace): argument Namespace containing options
Returns:
hyp (list of dicts): 1-best decoding results
"""
hyp = {"score": 0.0, "yseq": [self.blank]}
ys = to_device(self, torch.tensor(hyp["yseq"], dtype=torch.long)).unsqueeze(0)
if target_left_mask > -1:
ys_mask = to_device(self, subsequent_mask_limit(1, target_left_mask).unsqueeze(0))
else:
ys_mask = to_device(self, subsequent_mask(1).unsqueeze(0))
y, c = self.forward_one_step(ys, ys_mask, None)
for i, hi in enumerate(h):
ytu = torch.log_softmax(self.joint_network(hi, y[0]), dim=0)
logp, pred = torch.max(ytu, dim=0)
if pred != self.blank:
hyp["yseq"].append(int(pred))
hyp["score"] += float(logp)
ys = to_device(self, torch.tensor(hyp["yseq"]).unsqueeze(0))
if target_left_mask > -1:
ys_mask = to_device(
self, subsequent_mask_limit(len(hyp["yseq"]), target_left_mask).unsqueeze(0)
)
else:
ys_mask = to_device(
self, subsequent_mask(len(hyp["yseq"])).unsqueeze(0)
)
y, c = self.forward_one_step(ys, ys_mask, c)
return [hyp]
def recognize_beam(self, h, recog_args, rnnlm=None, target_left_mask=-1):
"""Beam search implementation for transformer-transducer.
Args:
h (torch.Tensor): encoder hidden state sequences (maxlen_in, Henc)
recog_args (Namespace): argument Namespace containing options
rnnlm (torch.nn.Module): language model module
Returns:
nbest_hyps (list of dicts): n-best decoding results
"""
beam = recog_args.beam_size
k_range = min(beam, self.odim)
nbest = recog_args.nbest
normscore = recog_args.score_norm_transducer
if rnnlm:
kept_hyps = [
{"score": 0.0, "yseq": [self.blank], "cache": None, "lm_state": None}
]
else:
kept_hyps = [{"score": 0.0, "yseq": [self.blank], "cache": None}]
for i, hi in enumerate(h):
hyps = kept_hyps
kept_hyps = []
while True:
new_hyp = max(hyps, key=lambda x: x["score"])
hyps.remove(new_hyp)
ys = to_device(self, torch.tensor(new_hyp["yseq"]).unsqueeze(0))
if target_left_mask > -1:
ys_mask = to_device(
self, subsequent_mask_limit(len(new_hyp["yseq"]), target_left_mask).unsqueeze(0)
)
else:
ys_mask = to_device(
self, subsequent_mask(len(new_hyp["yseq"])).unsqueeze(0)
)
y, c = self.forward_one_step(ys, ys_mask, new_hyp["cache"])
ytu = torch.log_softmax(self.joint(hi, y[0]), dim=0)
if rnnlm:
rnnlm_state, rnnlm_scores = rnnlm.predict(
new_hyp["lm_state"], ys[:, -1]
)
for k in six.moves.range(self.odim):
beam_hyp = {
"score": new_hyp["score"] + float(ytu[k]),
"yseq": new_hyp["yseq"][:],
"cache": new_hyp["cache"],
}
if rnnlm:
beam_hyp["lm_state"] = new_hyp["lm_state"]
if k == self.blank:
kept_hyps.append(beam_hyp)
else:
beam_hyp["yseq"].append(int(k))
beam_hyp["cache"] = c
if rnnlm:
beam_hyp["lm_state"] = rnnlm_state
beam_hyp["score"] += (
recog_args.lm_weight * rnnlm_scores[0][k]
)
hyps.append(beam_hyp)
if len(kept_hyps) >= k_range:
break
if normscore:
nbest_hyps = sorted(
kept_hyps, key=lambda x: x["score"] / len(x["yseq"]), reverse=True
)[:nbest]
else:
nbest_hyps = sorted(kept_hyps, key=lambda x: x["score"], reverse=True)[
:nbest
]
return nbest_hyps
class TransducerTransformerDecoder(BaseTransducerTransformerDecoder):
def __init__(
self,
jdim: int,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
blank: int = 0,
joint_activation_type="tanh",
):
assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
input_layer=input_layer,
use_output_layer=use_output_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
blank=blank,
joint_activation_type=joint_activation_type,
)
attention_dim = encoder_output_size
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
MultiHeadedAttention(
attention_heads, attention_dim, self_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
self.joint_network = JointNetwork(vocab_size, attention_dim, attention_dim, jdim, joint_activation_type)
```
#### File: pytorch_backend/transducer/joint_network.py
```python
import torch
from typing import Optional
from espnet.nets.pytorch_backend.nets_utils import get_activation
class JointNetwork(torch.nn.Module):
"""Transducer joint network module.
Args:
joint_space_size: Dimension of joint space
joint_activation_type: Activation type for joint network
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
hidden_size: int,
joint_space_size: int,
joint_activation_type: int,
joint_memory_reduction: bool,
):
"""Joint network initializer."""
super().__init__()
self.lin_enc = torch.nn.Linear(encoder_output_size, joint_space_size)
self.lin_dec = torch.nn.Linear(hidden_size, joint_space_size, bias=False)
self.lin_out = torch.nn.Linear(joint_space_size, vocab_size)
self.joint_activation = get_activation(joint_activation_type)
self.joint_memory_reduction = joint_memory_reduction
self.joint_dim = joint_space_size
def forward(self, h_enc: torch.Tensor, h_dec: torch.Tensor,
pred_len: Optional[torch.Tensor] = None,
target_len: Optional[torch.Tensor] = None,) -> torch.Tensor:
"""Joint computation of z.
Args:
h_enc: Batch of expanded hidden state (B, T, 1, D_enc)
h_dec: Batch of expanded hidden state (B, 1, U, D_dec)
Returns:
z: Output (B, T, U, vocab_size)
"""
if self.joint_memory_reduction and pred_len is not None:
batch = h_dec.size(0)
z = h_dec.new_zeros((sum(pred_len * (target_len + 1)), self.joint_dim))
_start = 0
for b in range(batch):
t = int(pred_len[b])
u_1 = int(target_len[b]) + 1
t_u = t * u_1
z[_start : (_start + t_u), :] = self.joint_activation(
self.lin_enc(h_enc[b][:t, :, :])
+ self.lin_dec(h_dec[b][:, :u_1, :])
).view(t_u, -1)
_start += t_u
else:
z = self.joint_activation(self.lin_enc(h_enc) + self.lin_dec(h_dec))
z = self.lin_out(z)
return z
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.